scsi: lpfc: Convert abort handling to SLI-3 and SLI-4 handlers
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT  2
54 #define LPFC_ABORT_WAIT  2
55
56 static char *dif_op_str[] = {
57         "PROT_NORMAL",
58         "PROT_READ_INSERT",
59         "PROT_WRITE_STRIP",
60         "PROT_READ_STRIP",
61         "PROT_WRITE_INSERT",
62         "PROT_READ_PASS",
63         "PROT_WRITE_PASS",
64 };
65
66 struct scsi_dif_tuple {
67         __be16 guard_tag;       /* Checksum */
68         __be16 app_tag;         /* Opaque storage */
69         __be32 ref_tag;         /* Target LBA or indirect LBA */
70 };
71
72 static struct lpfc_rport_data *
73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74 {
75         struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77         if (vport->phba->cfg_fof)
78                 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79         else
80                 return (struct lpfc_rport_data *)sdev->hostdata;
81 }
82
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90 static inline unsigned
91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
92 {
93         return sc->device->sector_size;
94 }
95
96 #define LPFC_CHECK_PROTECT_GUARD        1
97 #define LPFC_CHECK_PROTECT_REF          2
98 static inline unsigned
99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100 {
101         return 1;
102 }
103
104 static inline unsigned
105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106 {
107         if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108                 return 0;
109         if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110                 return 1;
111         return 0;
112 }
113
114 /**
115  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116  * @phba: Pointer to HBA object.
117  * @lpfc_cmd: lpfc scsi command object pointer.
118  *
119  * This function is called from the lpfc_prep_task_mgmt_cmd function to
120  * set the last bit in the response sge entry.
121  **/
122 static void
123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124                                 struct lpfc_io_buf *lpfc_cmd)
125 {
126         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127         if (sgl) {
128                 sgl += 1;
129                 sgl->word2 = le32_to_cpu(sgl->word2);
130                 bf_set(lpfc_sli4_sge_last, sgl, 1);
131                 sgl->word2 = cpu_to_le32(sgl->word2);
132         }
133 }
134
135 /**
136  * lpfc_update_stats - Update statistical data for the command completion
137  * @vport: The virtual port on which this call is executing.
138  * @lpfc_cmd: lpfc scsi command object pointer.
139  *
140  * This function is called when there is a command completion and this
141  * function updates the statistical data for the command completion.
142  **/
143 static void
144 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
145 {
146         struct lpfc_hba *phba = vport->phba;
147         struct lpfc_rport_data *rdata;
148         struct lpfc_nodelist *pnode;
149         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
150         unsigned long flags;
151         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
152         unsigned long latency;
153         int i;
154
155         if (!vport->stat_data_enabled ||
156             vport->stat_data_blocked ||
157             (cmd->result))
158                 return;
159
160         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161         rdata = lpfc_cmd->rdata;
162         pnode = rdata->pnode;
163
164         spin_lock_irqsave(shost->host_lock, flags);
165         if (!pnode ||
166             !pnode->lat_data ||
167             (phba->bucket_type == LPFC_NO_BUCKET)) {
168                 spin_unlock_irqrestore(shost->host_lock, flags);
169                 return;
170         }
171
172         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174                         phba->bucket_step;
175                 /* check array subscript bounds */
176                 if (i < 0)
177                         i = 0;
178                 else if (i >= LPFC_MAX_BUCKET_COUNT)
179                         i = LPFC_MAX_BUCKET_COUNT - 1;
180         } else {
181                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182                         if (latency <= (phba->bucket_base +
183                                 ((1<<i)*phba->bucket_step)))
184                                 break;
185         }
186
187         pnode->lat_data[i].cmd_count++;
188         spin_unlock_irqrestore(shost->host_lock, flags);
189 }
190
191 /**
192  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193  * @phba: The Hba for which this call is being executed.
194  *
195  * This routine is called when there is resource error in driver or firmware.
196  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197  * posts at most 1 event each second. This routine wakes up worker thread of
198  * @phba to process WORKER_RAM_DOWN_EVENT event.
199  *
200  * This routine should be called with no lock held.
201  **/
202 void
203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204 {
205         unsigned long flags;
206         uint32_t evt_posted;
207         unsigned long expires;
208
209         spin_lock_irqsave(&phba->hbalock, flags);
210         atomic_inc(&phba->num_rsrc_err);
211         phba->last_rsrc_error_time = jiffies;
212
213         expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214         if (time_after(expires, jiffies)) {
215                 spin_unlock_irqrestore(&phba->hbalock, flags);
216                 return;
217         }
218
219         phba->last_ramp_down_time = jiffies;
220
221         spin_unlock_irqrestore(&phba->hbalock, flags);
222
223         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225         if (!evt_posted)
226                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229         if (!evt_posted)
230                 lpfc_worker_wake_up(phba);
231         return;
232 }
233
234 /**
235  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236  * @phba: The Hba for which this call is being executed.
237  *
238  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
239  * thread.This routine reduces queue depth for all scsi device on each vport
240  * associated with @phba.
241  **/
242 void
243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 {
245         struct lpfc_vport **vports;
246         struct Scsi_Host  *shost;
247         struct scsi_device *sdev;
248         unsigned long new_queue_depth;
249         unsigned long num_rsrc_err, num_cmd_success;
250         int i;
251
252         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253         num_cmd_success = atomic_read(&phba->num_cmd_success);
254
255         /*
256          * The error and success command counters are global per
257          * driver instance.  If another handler has already
258          * operated on this error event, just exit.
259          */
260         if (num_rsrc_err == 0)
261                 return;
262
263         vports = lpfc_create_vport_work_array(phba);
264         if (vports != NULL)
265                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266                         shost = lpfc_shost_from_vport(vports[i]);
267                         shost_for_each_device(sdev, shost) {
268                                 new_queue_depth =
269                                         sdev->queue_depth * num_rsrc_err /
270                                         (num_rsrc_err + num_cmd_success);
271                                 if (!new_queue_depth)
272                                         new_queue_depth = sdev->queue_depth - 1;
273                                 else
274                                         new_queue_depth = sdev->queue_depth -
275                                                                 new_queue_depth;
276                                 scsi_change_queue_depth(sdev, new_queue_depth);
277                         }
278                 }
279         lpfc_destroy_vport_work_array(phba, vports);
280         atomic_set(&phba->num_rsrc_err, 0);
281         atomic_set(&phba->num_cmd_success, 0);
282 }
283
284 /**
285  * lpfc_scsi_dev_block - set all scsi hosts to block state
286  * @phba: Pointer to HBA context object.
287  *
288  * This function walks vport list and set each SCSI host to block state
289  * by invoking fc_remote_port_delete() routine. This function is invoked
290  * with EEH when device's PCI slot has been permanently disabled.
291  **/
292 void
293 lpfc_scsi_dev_block(struct lpfc_hba *phba)
294 {
295         struct lpfc_vport **vports;
296         struct Scsi_Host  *shost;
297         struct scsi_device *sdev;
298         struct fc_rport *rport;
299         int i;
300
301         vports = lpfc_create_vport_work_array(phba);
302         if (vports != NULL)
303                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304                         shost = lpfc_shost_from_vport(vports[i]);
305                         shost_for_each_device(sdev, shost) {
306                                 rport = starget_to_rport(scsi_target(sdev));
307                                 fc_remote_port_delete(rport);
308                         }
309                 }
310         lpfc_destroy_vport_work_array(phba, vports);
311 }
312
313 /**
314  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315  * @vport: The virtual port for which this call being executed.
316  * @num_to_alloc: The requested number of buffers to allocate.
317  *
318  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319  * the scsi buffer contains all the necessary information needed to initiate
320  * a SCSI I/O. The non-DMAable buffer region contains information to build
321  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322  * and the initial BPL. In addition to allocating memory, the FCP CMND and
323  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
324  *
325  * Return codes:
326  *   int - number of scsi buffers that were allocated.
327  *   0 = failure, less than num_to_alloc is a partial failure.
328  **/
329 static int
330 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
331 {
332         struct lpfc_hba *phba = vport->phba;
333         struct lpfc_io_buf *psb;
334         struct ulp_bde64 *bpl;
335         IOCB_t *iocb;
336         dma_addr_t pdma_phys_fcp_cmd;
337         dma_addr_t pdma_phys_fcp_rsp;
338         dma_addr_t pdma_phys_sgl;
339         uint16_t iotag;
340         int bcnt, bpl_size;
341
342         bpl_size = phba->cfg_sg_dma_buf_size -
343                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
344
345         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346                          "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347                          num_to_alloc, phba->cfg_sg_dma_buf_size,
348                          (int)sizeof(struct fcp_cmnd),
349                          (int)sizeof(struct fcp_rsp), bpl_size);
350
351         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352                 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353                 if (!psb)
354                         break;
355
356                 /*
357                  * Get memory from the pci pool to map the virt space to pci
358                  * bus space for an I/O.  The DMA buffer includes space for the
359                  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360                  * necessary to support the sg_tablesize.
361                  */
362                 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363                                         GFP_KERNEL, &psb->dma_handle);
364                 if (!psb->data) {
365                         kfree(psb);
366                         break;
367                 }
368
369
370                 /* Allocate iotag for psb->cur_iocbq. */
371                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372                 if (iotag == 0) {
373                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374                                       psb->data, psb->dma_handle);
375                         kfree(psb);
376                         break;
377                 }
378                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
379
380                 psb->fcp_cmnd = psb->data;
381                 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382                 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383                         sizeof(struct fcp_rsp);
384
385                 /* Initialize local short-hand pointers. */
386                 bpl = (struct ulp_bde64 *)psb->dma_sgl;
387                 pdma_phys_fcp_cmd = psb->dma_handle;
388                 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389                 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390                         sizeof(struct fcp_rsp);
391
392                 /*
393                  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394                  * are sg list bdes.  Initialize the first two and leave the
395                  * rest for queuecommand.
396                  */
397                 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398                 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399                 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400                 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401                 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
402
403                 /* Setup the physical region for the FCP RSP */
404                 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405                 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406                 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407                 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408                 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
409
410                 /*
411                  * Since the IOCB for the FCP I/O is built into this
412                  * lpfc_scsi_buf, initialize it with all known data now.
413                  */
414                 iocb = &psb->cur_iocbq.iocb;
415                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416                 if ((phba->sli_rev == 3) &&
417                                 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418                         /* fill in immediate fcp command BDE */
419                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420                         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421                         iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422                                         unsli3.fcp_ext.icd);
423                         iocb->un.fcpi64.bdl.addrHigh = 0;
424                         iocb->ulpBdeCount = 0;
425                         iocb->ulpLe = 0;
426                         /* fill in response BDE */
427                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428                                                         BUFF_TYPE_BDE_64;
429                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430                                 sizeof(struct fcp_rsp);
431                         iocb->unsli3.fcp_ext.rbde.addrLow =
432                                 putPaddrLow(pdma_phys_fcp_rsp);
433                         iocb->unsli3.fcp_ext.rbde.addrHigh =
434                                 putPaddrHigh(pdma_phys_fcp_rsp);
435                 } else {
436                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437                         iocb->un.fcpi64.bdl.bdeSize =
438                                         (2 * sizeof(struct ulp_bde64));
439                         iocb->un.fcpi64.bdl.addrLow =
440                                         putPaddrLow(pdma_phys_sgl);
441                         iocb->un.fcpi64.bdl.addrHigh =
442                                         putPaddrHigh(pdma_phys_sgl);
443                         iocb->ulpBdeCount = 1;
444                         iocb->ulpLe = 1;
445                 }
446                 iocb->ulpClass = CLASS3;
447                 psb->status = IOSTAT_SUCCESS;
448                 /* Put it back into the SCSI buffer list */
449                 psb->cur_iocbq.context1  = psb;
450                 spin_lock_init(&psb->buf_lock);
451                 lpfc_release_scsi_buf_s3(phba, psb);
452
453         }
454
455         return bcnt;
456 }
457
458 /**
459  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460  * @vport: pointer to lpfc vport data structure.
461  *
462  * This routine is invoked by the vport cleanup for deletions and the cleanup
463  * for an ndlp on removal.
464  **/
465 void
466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
467 {
468         struct lpfc_hba *phba = vport->phba;
469         struct lpfc_io_buf *psb, *next_psb;
470         struct lpfc_sli4_hdw_queue *qp;
471         unsigned long iflag = 0;
472         int idx;
473
474         if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475                 return;
476
477         spin_lock_irqsave(&phba->hbalock, iflag);
478         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479                 qp = &phba->sli4_hba.hdwq[idx];
480
481                 spin_lock(&qp->abts_io_buf_list_lock);
482                 list_for_each_entry_safe(psb, next_psb,
483                                          &qp->lpfc_abts_io_buf_list, list) {
484                         if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
485                                 continue;
486
487                         if (psb->rdata && psb->rdata->pnode &&
488                             psb->rdata->pnode->vport == vport)
489                                 psb->rdata = NULL;
490                 }
491                 spin_unlock(&qp->abts_io_buf_list_lock);
492         }
493         spin_unlock_irqrestore(&phba->hbalock, iflag);
494 }
495
496 /**
497  * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498  * @phba: pointer to lpfc hba data structure.
499  * @axri: pointer to the fcp xri abort wcqe structure.
500  * @idx: index into hdwq
501  *
502  * This routine is invoked by the worker thread to process a SLI4 fast-path
503  * FCP or NVME aborted xri.
504  **/
505 void
506 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
507                          struct sli4_wcqe_xri_aborted *axri, int idx)
508 {
509         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
510         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
511         struct lpfc_io_buf *psb, *next_psb;
512         struct lpfc_sli4_hdw_queue *qp;
513         unsigned long iflag = 0;
514         struct lpfc_iocbq *iocbq;
515         int i;
516         struct lpfc_nodelist *ndlp;
517         int rrq_empty = 0;
518         struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
519
520         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
521                 return;
522
523         qp = &phba->sli4_hba.hdwq[idx];
524         spin_lock_irqsave(&phba->hbalock, iflag);
525         spin_lock(&qp->abts_io_buf_list_lock);
526         list_for_each_entry_safe(psb, next_psb,
527                 &qp->lpfc_abts_io_buf_list, list) {
528                 if (psb->cur_iocbq.sli4_xritag == xri) {
529                         list_del_init(&psb->list);
530                         psb->flags &= ~LPFC_SBUF_XBUSY;
531                         psb->status = IOSTAT_SUCCESS;
532                         if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
533                                 qp->abts_nvme_io_bufs--;
534                                 spin_unlock(&qp->abts_io_buf_list_lock);
535                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
536                                 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
537                                 return;
538                         }
539                         qp->abts_scsi_io_bufs--;
540                         spin_unlock(&qp->abts_io_buf_list_lock);
541
542                         if (psb->rdata && psb->rdata->pnode)
543                                 ndlp = psb->rdata->pnode;
544                         else
545                                 ndlp = NULL;
546
547                         rrq_empty = list_empty(&phba->active_rrq_list);
548                         spin_unlock_irqrestore(&phba->hbalock, iflag);
549                         if (ndlp) {
550                                 lpfc_set_rrq_active(phba, ndlp,
551                                         psb->cur_iocbq.sli4_lxritag, rxid, 1);
552                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
553                         }
554                         lpfc_release_scsi_buf_s4(phba, psb);
555                         if (rrq_empty)
556                                 lpfc_worker_wake_up(phba);
557                         return;
558                 }
559         }
560         spin_unlock(&qp->abts_io_buf_list_lock);
561         for (i = 1; i <= phba->sli.last_iotag; i++) {
562                 iocbq = phba->sli.iocbq_lookup[i];
563
564                 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
565                     (iocbq->iocb_flag & LPFC_IO_LIBDFC))
566                         continue;
567                 if (iocbq->sli4_xritag != xri)
568                         continue;
569                 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
570                 psb->flags &= ~LPFC_SBUF_XBUSY;
571                 spin_unlock_irqrestore(&phba->hbalock, iflag);
572                 if (!list_empty(&pring->txq))
573                         lpfc_worker_wake_up(phba);
574                 return;
575
576         }
577         spin_unlock_irqrestore(&phba->hbalock, iflag);
578 }
579
580 /**
581  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
582  * @phba: The HBA for which this call is being executed.
583  * @ndlp: pointer to a node-list data structure.
584  * @cmnd: Pointer to scsi_cmnd data structure.
585  *
586  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
587  * and returns to caller.
588  *
589  * Return codes:
590  *   NULL - Error
591  *   Pointer to lpfc_scsi_buf - Success
592  **/
593 static struct lpfc_io_buf *
594 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
595                      struct scsi_cmnd *cmnd)
596 {
597         struct lpfc_io_buf *lpfc_cmd = NULL;
598         struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
599         unsigned long iflag = 0;
600
601         spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
602         list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
603                          list);
604         if (!lpfc_cmd) {
605                 spin_lock(&phba->scsi_buf_list_put_lock);
606                 list_splice(&phba->lpfc_scsi_buf_list_put,
607                             &phba->lpfc_scsi_buf_list_get);
608                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
609                 list_remove_head(scsi_buf_list_get, lpfc_cmd,
610                                  struct lpfc_io_buf, list);
611                 spin_unlock(&phba->scsi_buf_list_put_lock);
612         }
613         spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
614
615         if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
616                 atomic_inc(&ndlp->cmd_pending);
617                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
618         }
619         return  lpfc_cmd;
620 }
621 /**
622  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
623  * @phba: The HBA for which this call is being executed.
624  * @ndlp: pointer to a node-list data structure.
625  * @cmnd: Pointer to scsi_cmnd data structure.
626  *
627  * This routine removes a scsi buffer from head of @hdwq io_buf_list
628  * and returns to caller.
629  *
630  * Return codes:
631  *   NULL - Error
632  *   Pointer to lpfc_scsi_buf - Success
633  **/
634 static struct lpfc_io_buf *
635 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
636                      struct scsi_cmnd *cmnd)
637 {
638         struct lpfc_io_buf *lpfc_cmd;
639         struct lpfc_sli4_hdw_queue *qp;
640         struct sli4_sge *sgl;
641         dma_addr_t pdma_phys_fcp_rsp;
642         dma_addr_t pdma_phys_fcp_cmd;
643         uint32_t cpu, idx;
644         int tag;
645         struct fcp_cmd_rsp_buf *tmp = NULL;
646
647         cpu = raw_smp_processor_id();
648         if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
649                 tag = blk_mq_unique_tag(cmnd->request);
650                 idx = blk_mq_unique_tag_to_hwq(tag);
651         } else {
652                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
653         }
654
655         lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
656                                    !phba->cfg_xri_rebalancing);
657         if (!lpfc_cmd) {
658                 qp = &phba->sli4_hba.hdwq[idx];
659                 qp->empty_io_bufs++;
660                 return NULL;
661         }
662
663         /* Setup key fields in buffer that may have been changed
664          * if other protocols used this buffer.
665          */
666         lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
667         lpfc_cmd->prot_seg_cnt = 0;
668         lpfc_cmd->seg_cnt = 0;
669         lpfc_cmd->timeout = 0;
670         lpfc_cmd->flags = 0;
671         lpfc_cmd->start_time = jiffies;
672         lpfc_cmd->waitq = NULL;
673         lpfc_cmd->cpu = cpu;
674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
675         lpfc_cmd->prot_data_type = 0;
676 #endif
677         tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
678         if (!tmp) {
679                 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
680                 return NULL;
681         }
682
683         lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
684         lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
685
686         /*
687          * The first two SGEs are the FCP_CMD and FCP_RSP.
688          * The balance are sg list bdes. Initialize the
689          * first two and leave the rest for queuecommand.
690          */
691         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
692         pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
693         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
694         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
695         sgl->word2 = le32_to_cpu(sgl->word2);
696         bf_set(lpfc_sli4_sge_last, sgl, 0);
697         sgl->word2 = cpu_to_le32(sgl->word2);
698         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
699         sgl++;
700
701         /* Setup the physical region for the FCP RSP */
702         pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
703         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
704         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
705         sgl->word2 = le32_to_cpu(sgl->word2);
706         bf_set(lpfc_sli4_sge_last, sgl, 1);
707         sgl->word2 = cpu_to_le32(sgl->word2);
708         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
709
710         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
711                 atomic_inc(&ndlp->cmd_pending);
712                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
713         }
714         return  lpfc_cmd;
715 }
716 /**
717  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
718  * @phba: The HBA for which this call is being executed.
719  * @ndlp: pointer to a node-list data structure.
720  * @cmnd: Pointer to scsi_cmnd data structure.
721  *
722  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
723  * and returns to caller.
724  *
725  * Return codes:
726  *   NULL - Error
727  *   Pointer to lpfc_scsi_buf - Success
728  **/
729 static struct lpfc_io_buf*
730 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
731                   struct scsi_cmnd *cmnd)
732 {
733         return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
734 }
735
736 /**
737  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
738  * @phba: The Hba for which this call is being executed.
739  * @psb: The scsi buffer which is being released.
740  *
741  * This routine releases @psb scsi buffer by adding it to tail of @phba
742  * lpfc_scsi_buf_list list.
743  **/
744 static void
745 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
746 {
747         unsigned long iflag = 0;
748
749         psb->seg_cnt = 0;
750         psb->prot_seg_cnt = 0;
751
752         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
753         psb->pCmd = NULL;
754         psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
755         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
756         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
757 }
758
759 /**
760  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
761  * @phba: The Hba for which this call is being executed.
762  * @psb: The scsi buffer which is being released.
763  *
764  * This routine releases @psb scsi buffer by adding it to tail of @hdwq
765  * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
766  * and cannot be reused for at least RA_TOV amount of time if it was
767  * aborted.
768  **/
769 static void
770 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
771 {
772         struct lpfc_sli4_hdw_queue *qp;
773         unsigned long iflag = 0;
774
775         psb->seg_cnt = 0;
776         psb->prot_seg_cnt = 0;
777
778         qp = psb->hdwq;
779         if (psb->flags & LPFC_SBUF_XBUSY) {
780                 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
781                 psb->pCmd = NULL;
782                 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
783                 qp->abts_scsi_io_bufs++;
784                 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
785         } else {
786                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
787         }
788 }
789
790 /**
791  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
792  * @phba: The Hba for which this call is being executed.
793  * @psb: The scsi buffer which is being released.
794  *
795  * This routine releases @psb scsi buffer by adding it to tail of @phba
796  * lpfc_scsi_buf_list list.
797  **/
798 static void
799 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
800 {
801         if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
802                 atomic_dec(&psb->ndlp->cmd_pending);
803
804         psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
805         phba->lpfc_release_scsi_buf(phba, psb);
806 }
807
808 /**
809  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
810  * @data: A pointer to the immediate command data portion of the IOCB.
811  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
812  *
813  * The routine copies the entire FCP command from @fcp_cmnd to @data while
814  * byte swapping the data to big endian format for transmission on the wire.
815  **/
816 static void
817 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
818 {
819         int i, j;
820
821         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
822              i += sizeof(uint32_t), j++) {
823                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
824         }
825 }
826
827 /**
828  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
829  * @phba: The Hba for which this call is being executed.
830  * @lpfc_cmd: The scsi buffer which is going to be mapped.
831  *
832  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
833  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
834  * through sg elements and format the bde. This routine also initializes all
835  * IOCB fields which are dependent on scsi command request buffer.
836  *
837  * Return codes:
838  *   1 - Error
839  *   0 - Success
840  **/
841 static int
842 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
843 {
844         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
845         struct scatterlist *sgel = NULL;
846         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
847         struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
848         struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
849         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
850         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
851         dma_addr_t physaddr;
852         uint32_t num_bde = 0;
853         int nseg, datadir = scsi_cmnd->sc_data_direction;
854
855         /*
856          * There are three possibilities here - use scatter-gather segment, use
857          * the single mapping, or neither.  Start the lpfc command prep by
858          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
859          * data bde entry.
860          */
861         bpl += 2;
862         if (scsi_sg_count(scsi_cmnd)) {
863                 /*
864                  * The driver stores the segment count returned from pci_map_sg
865                  * because this a count of dma-mappings used to map the use_sg
866                  * pages.  They are not guaranteed to be the same for those
867                  * architectures that implement an IOMMU.
868                  */
869
870                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
871                                   scsi_sg_count(scsi_cmnd), datadir);
872                 if (unlikely(!nseg))
873                         return 1;
874
875                 lpfc_cmd->seg_cnt = nseg;
876                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
877                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
878                                         "9064 BLKGRD: %s: Too many sg segments"
879                                         " from dma_map_sg.  Config %d, seg_cnt"
880                                         " %d\n", __func__, phba->cfg_sg_seg_cnt,
881                                         lpfc_cmd->seg_cnt);
882                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
883                         lpfc_cmd->seg_cnt = 0;
884                         scsi_dma_unmap(scsi_cmnd);
885                         return 2;
886                 }
887
888                 /*
889                  * The driver established a maximum scatter-gather segment count
890                  * during probe that limits the number of sg elements in any
891                  * single scsi command.  Just run through the seg_cnt and format
892                  * the bde's.
893                  * When using SLI-3 the driver will try to fit all the BDEs into
894                  * the IOCB. If it can't then the BDEs get added to a BPL as it
895                  * does for SLI-2 mode.
896                  */
897                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
898                         physaddr = sg_dma_address(sgel);
899                         if (phba->sli_rev == 3 &&
900                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
901                             !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
902                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
903                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
904                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
905                                 data_bde->addrLow = putPaddrLow(physaddr);
906                                 data_bde->addrHigh = putPaddrHigh(physaddr);
907                                 data_bde++;
908                         } else {
909                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
910                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
911                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
912                                 bpl->addrLow =
913                                         le32_to_cpu(putPaddrLow(physaddr));
914                                 bpl->addrHigh =
915                                         le32_to_cpu(putPaddrHigh(physaddr));
916                                 bpl++;
917                         }
918                 }
919         }
920
921         /*
922          * Finish initializing those IOCB fields that are dependent on the
923          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
924          * explicitly reinitialized and for SLI-3 the extended bde count is
925          * explicitly reinitialized since all iocb memory resources are reused.
926          */
927         if (phba->sli_rev == 3 &&
928             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
929             !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
930                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
931                         /*
932                          * The extended IOCB format can only fit 3 BDE or a BPL.
933                          * This I/O has more than 3 BDE so the 1st data bde will
934                          * be a BPL that is filled in here.
935                          */
936                         physaddr = lpfc_cmd->dma_handle;
937                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
938                         data_bde->tus.f.bdeSize = (num_bde *
939                                                    sizeof(struct ulp_bde64));
940                         physaddr += (sizeof(struct fcp_cmnd) +
941                                      sizeof(struct fcp_rsp) +
942                                      (2 * sizeof(struct ulp_bde64)));
943                         data_bde->addrHigh = putPaddrHigh(physaddr);
944                         data_bde->addrLow = putPaddrLow(physaddr);
945                         /* ebde count includes the response bde and data bpl */
946                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
947                 } else {
948                         /* ebde count includes the response bde and data bdes */
949                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
950                 }
951         } else {
952                 iocb_cmd->un.fcpi64.bdl.bdeSize =
953                         ((num_bde + 2) * sizeof(struct ulp_bde64));
954                 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
955         }
956         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
957
958         /*
959          * Due to difference in data length between DIF/non-DIF paths,
960          * we need to set word 4 of IOCB here
961          */
962         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
963         lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
964         return 0;
965 }
966
967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
968
969 /* Return BG_ERR_INIT if error injection is detected by Initiator */
970 #define BG_ERR_INIT     0x1
971 /* Return BG_ERR_TGT if error injection is detected by Target */
972 #define BG_ERR_TGT      0x2
973 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
974 #define BG_ERR_SWAP     0x10
975 /**
976  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
977  * error injection
978  **/
979 #define BG_ERR_CHECK    0x20
980
981 /**
982  * lpfc_bg_err_inject - Determine if we should inject an error
983  * @phba: The Hba for which this call is being executed.
984  * @sc: The SCSI command to examine
985  * @reftag: (out) BlockGuard reference tag for transmitted data
986  * @apptag: (out) BlockGuard application tag for transmitted data
987  * @new_guard: (in) Value to replace CRC with if needed
988  *
989  * Returns BG_ERR_* bit mask or 0 if request ignored
990  **/
991 static int
992 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
993                 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
994 {
995         struct scatterlist *sgpe; /* s/g prot entry */
996         struct lpfc_io_buf *lpfc_cmd = NULL;
997         struct scsi_dif_tuple *src = NULL;
998         struct lpfc_nodelist *ndlp;
999         struct lpfc_rport_data *rdata;
1000         uint32_t op = scsi_get_prot_op(sc);
1001         uint32_t blksize;
1002         uint32_t numblks;
1003         sector_t lba;
1004         int rc = 0;
1005         int blockoff = 0;
1006
1007         if (op == SCSI_PROT_NORMAL)
1008                 return 0;
1009
1010         sgpe = scsi_prot_sglist(sc);
1011         lba = scsi_get_lba(sc);
1012
1013         /* First check if we need to match the LBA */
1014         if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1015                 blksize = lpfc_cmd_blksize(sc);
1016                 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1017
1018                 /* Make sure we have the right LBA if one is specified */
1019                 if ((phba->lpfc_injerr_lba < lba) ||
1020                         (phba->lpfc_injerr_lba >= (lba + numblks)))
1021                         return 0;
1022                 if (sgpe) {
1023                         blockoff = phba->lpfc_injerr_lba - lba;
1024                         numblks = sg_dma_len(sgpe) /
1025                                 sizeof(struct scsi_dif_tuple);
1026                         if (numblks < blockoff)
1027                                 blockoff = numblks;
1028                 }
1029         }
1030
1031         /* Next check if we need to match the remote NPortID or WWPN */
1032         rdata = lpfc_rport_data_from_scsi_device(sc->device);
1033         if (rdata && rdata->pnode) {
1034                 ndlp = rdata->pnode;
1035
1036                 /* Make sure we have the right NPortID if one is specified */
1037                 if (phba->lpfc_injerr_nportid  &&
1038                         (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1039                         return 0;
1040
1041                 /*
1042                  * Make sure we have the right WWPN if one is specified.
1043                  * wwn[0] should be a non-zero NAA in a good WWPN.
1044                  */
1045                 if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1046                         (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1047                                 sizeof(struct lpfc_name)) != 0))
1048                         return 0;
1049         }
1050
1051         /* Setup a ptr to the protection data if the SCSI host provides it */
1052         if (sgpe) {
1053                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1054                 src += blockoff;
1055                 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1056         }
1057
1058         /* Should we change the Reference Tag */
1059         if (reftag) {
1060                 if (phba->lpfc_injerr_wref_cnt) {
1061                         switch (op) {
1062                         case SCSI_PROT_WRITE_PASS:
1063                                 if (src) {
1064                                         /*
1065                                          * For WRITE_PASS, force the error
1066                                          * to be sent on the wire. It should
1067                                          * be detected by the Target.
1068                                          * If blockoff != 0 error will be
1069                                          * inserted in middle of the IO.
1070                                          */
1071
1072                                         lpfc_printf_log(phba, KERN_ERR,
1073                                                         LOG_TRACE_EVENT,
1074                                         "9076 BLKGRD: Injecting reftag error: "
1075                                         "write lba x%lx + x%x oldrefTag x%x\n",
1076                                         (unsigned long)lba, blockoff,
1077                                         be32_to_cpu(src->ref_tag));
1078
1079                                         /*
1080                                          * Save the old ref_tag so we can
1081                                          * restore it on completion.
1082                                          */
1083                                         if (lpfc_cmd) {
1084                                                 lpfc_cmd->prot_data_type =
1085                                                         LPFC_INJERR_REFTAG;
1086                                                 lpfc_cmd->prot_data_segment =
1087                                                         src;
1088                                                 lpfc_cmd->prot_data =
1089                                                         src->ref_tag;
1090                                         }
1091                                         src->ref_tag = cpu_to_be32(0xDEADBEEF);
1092                                         phba->lpfc_injerr_wref_cnt--;
1093                                         if (phba->lpfc_injerr_wref_cnt == 0) {
1094                                                 phba->lpfc_injerr_nportid = 0;
1095                                                 phba->lpfc_injerr_lba =
1096                                                         LPFC_INJERR_LBA_OFF;
1097                                                 memset(&phba->lpfc_injerr_wwpn,
1098                                                   0, sizeof(struct lpfc_name));
1099                                         }
1100                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1101
1102                                         break;
1103                                 }
1104                                 fallthrough;
1105                         case SCSI_PROT_WRITE_INSERT:
1106                                 /*
1107                                  * For WRITE_INSERT, force the error
1108                                  * to be sent on the wire. It should be
1109                                  * detected by the Target.
1110                                  */
1111                                 /* DEADBEEF will be the reftag on the wire */
1112                                 *reftag = 0xDEADBEEF;
1113                                 phba->lpfc_injerr_wref_cnt--;
1114                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1115                                         phba->lpfc_injerr_nportid = 0;
1116                                         phba->lpfc_injerr_lba =
1117                                         LPFC_INJERR_LBA_OFF;
1118                                         memset(&phba->lpfc_injerr_wwpn,
1119                                                 0, sizeof(struct lpfc_name));
1120                                 }
1121                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1122
1123                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1124                                         "9078 BLKGRD: Injecting reftag error: "
1125                                         "write lba x%lx\n", (unsigned long)lba);
1126                                 break;
1127                         case SCSI_PROT_WRITE_STRIP:
1128                                 /*
1129                                  * For WRITE_STRIP and WRITE_PASS,
1130                                  * force the error on data
1131                                  * being copied from SLI-Host to SLI-Port.
1132                                  */
1133                                 *reftag = 0xDEADBEEF;
1134                                 phba->lpfc_injerr_wref_cnt--;
1135                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1136                                         phba->lpfc_injerr_nportid = 0;
1137                                         phba->lpfc_injerr_lba =
1138                                                 LPFC_INJERR_LBA_OFF;
1139                                         memset(&phba->lpfc_injerr_wwpn,
1140                                                 0, sizeof(struct lpfc_name));
1141                                 }
1142                                 rc = BG_ERR_INIT;
1143
1144                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1145                                         "9077 BLKGRD: Injecting reftag error: "
1146                                         "write lba x%lx\n", (unsigned long)lba);
1147                                 break;
1148                         }
1149                 }
1150                 if (phba->lpfc_injerr_rref_cnt) {
1151                         switch (op) {
1152                         case SCSI_PROT_READ_INSERT:
1153                         case SCSI_PROT_READ_STRIP:
1154                         case SCSI_PROT_READ_PASS:
1155                                 /*
1156                                  * For READ_STRIP and READ_PASS, force the
1157                                  * error on data being read off the wire. It
1158                                  * should force an IO error to the driver.
1159                                  */
1160                                 *reftag = 0xDEADBEEF;
1161                                 phba->lpfc_injerr_rref_cnt--;
1162                                 if (phba->lpfc_injerr_rref_cnt == 0) {
1163                                         phba->lpfc_injerr_nportid = 0;
1164                                         phba->lpfc_injerr_lba =
1165                                                 LPFC_INJERR_LBA_OFF;
1166                                         memset(&phba->lpfc_injerr_wwpn,
1167                                                 0, sizeof(struct lpfc_name));
1168                                 }
1169                                 rc = BG_ERR_INIT;
1170
1171                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1172                                         "9079 BLKGRD: Injecting reftag error: "
1173                                         "read lba x%lx\n", (unsigned long)lba);
1174                                 break;
1175                         }
1176                 }
1177         }
1178
1179         /* Should we change the Application Tag */
1180         if (apptag) {
1181                 if (phba->lpfc_injerr_wapp_cnt) {
1182                         switch (op) {
1183                         case SCSI_PROT_WRITE_PASS:
1184                                 if (src) {
1185                                         /*
1186                                          * For WRITE_PASS, force the error
1187                                          * to be sent on the wire. It should
1188                                          * be detected by the Target.
1189                                          * If blockoff != 0 error will be
1190                                          * inserted in middle of the IO.
1191                                          */
1192
1193                                         lpfc_printf_log(phba, KERN_ERR,
1194                                                         LOG_TRACE_EVENT,
1195                                         "9080 BLKGRD: Injecting apptag error: "
1196                                         "write lba x%lx + x%x oldappTag x%x\n",
1197                                         (unsigned long)lba, blockoff,
1198                                         be16_to_cpu(src->app_tag));
1199
1200                                         /*
1201                                          * Save the old app_tag so we can
1202                                          * restore it on completion.
1203                                          */
1204                                         if (lpfc_cmd) {
1205                                                 lpfc_cmd->prot_data_type =
1206                                                         LPFC_INJERR_APPTAG;
1207                                                 lpfc_cmd->prot_data_segment =
1208                                                         src;
1209                                                 lpfc_cmd->prot_data =
1210                                                         src->app_tag;
1211                                         }
1212                                         src->app_tag = cpu_to_be16(0xDEAD);
1213                                         phba->lpfc_injerr_wapp_cnt--;
1214                                         if (phba->lpfc_injerr_wapp_cnt == 0) {
1215                                                 phba->lpfc_injerr_nportid = 0;
1216                                                 phba->lpfc_injerr_lba =
1217                                                         LPFC_INJERR_LBA_OFF;
1218                                                 memset(&phba->lpfc_injerr_wwpn,
1219                                                   0, sizeof(struct lpfc_name));
1220                                         }
1221                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1222                                         break;
1223                                 }
1224                                 fallthrough;
1225                         case SCSI_PROT_WRITE_INSERT:
1226                                 /*
1227                                  * For WRITE_INSERT, force the
1228                                  * error to be sent on the wire. It should be
1229                                  * detected by the Target.
1230                                  */
1231                                 /* DEAD will be the apptag on the wire */
1232                                 *apptag = 0xDEAD;
1233                                 phba->lpfc_injerr_wapp_cnt--;
1234                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1235                                         phba->lpfc_injerr_nportid = 0;
1236                                         phba->lpfc_injerr_lba =
1237                                                 LPFC_INJERR_LBA_OFF;
1238                                         memset(&phba->lpfc_injerr_wwpn,
1239                                                 0, sizeof(struct lpfc_name));
1240                                 }
1241                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1242
1243                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1244                                         "0813 BLKGRD: Injecting apptag error: "
1245                                         "write lba x%lx\n", (unsigned long)lba);
1246                                 break;
1247                         case SCSI_PROT_WRITE_STRIP:
1248                                 /*
1249                                  * For WRITE_STRIP and WRITE_PASS,
1250                                  * force the error on data
1251                                  * being copied from SLI-Host to SLI-Port.
1252                                  */
1253                                 *apptag = 0xDEAD;
1254                                 phba->lpfc_injerr_wapp_cnt--;
1255                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1256                                         phba->lpfc_injerr_nportid = 0;
1257                                         phba->lpfc_injerr_lba =
1258                                                 LPFC_INJERR_LBA_OFF;
1259                                         memset(&phba->lpfc_injerr_wwpn,
1260                                                 0, sizeof(struct lpfc_name));
1261                                 }
1262                                 rc = BG_ERR_INIT;
1263
1264                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1265                                         "0812 BLKGRD: Injecting apptag error: "
1266                                         "write lba x%lx\n", (unsigned long)lba);
1267                                 break;
1268                         }
1269                 }
1270                 if (phba->lpfc_injerr_rapp_cnt) {
1271                         switch (op) {
1272                         case SCSI_PROT_READ_INSERT:
1273                         case SCSI_PROT_READ_STRIP:
1274                         case SCSI_PROT_READ_PASS:
1275                                 /*
1276                                  * For READ_STRIP and READ_PASS, force the
1277                                  * error on data being read off the wire. It
1278                                  * should force an IO error to the driver.
1279                                  */
1280                                 *apptag = 0xDEAD;
1281                                 phba->lpfc_injerr_rapp_cnt--;
1282                                 if (phba->lpfc_injerr_rapp_cnt == 0) {
1283                                         phba->lpfc_injerr_nportid = 0;
1284                                         phba->lpfc_injerr_lba =
1285                                                 LPFC_INJERR_LBA_OFF;
1286                                         memset(&phba->lpfc_injerr_wwpn,
1287                                                 0, sizeof(struct lpfc_name));
1288                                 }
1289                                 rc = BG_ERR_INIT;
1290
1291                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1292                                         "0814 BLKGRD: Injecting apptag error: "
1293                                         "read lba x%lx\n", (unsigned long)lba);
1294                                 break;
1295                         }
1296                 }
1297         }
1298
1299
1300         /* Should we change the Guard Tag */
1301         if (new_guard) {
1302                 if (phba->lpfc_injerr_wgrd_cnt) {
1303                         switch (op) {
1304                         case SCSI_PROT_WRITE_PASS:
1305                                 rc = BG_ERR_CHECK;
1306                                 fallthrough;
1307
1308                         case SCSI_PROT_WRITE_INSERT:
1309                                 /*
1310                                  * For WRITE_INSERT, force the
1311                                  * error to be sent on the wire. It should be
1312                                  * detected by the Target.
1313                                  */
1314                                 phba->lpfc_injerr_wgrd_cnt--;
1315                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1316                                         phba->lpfc_injerr_nportid = 0;
1317                                         phba->lpfc_injerr_lba =
1318                                                 LPFC_INJERR_LBA_OFF;
1319                                         memset(&phba->lpfc_injerr_wwpn,
1320                                                 0, sizeof(struct lpfc_name));
1321                                 }
1322
1323                                 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1324                                 /* Signals the caller to swap CRC->CSUM */
1325
1326                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1327                                         "0817 BLKGRD: Injecting guard error: "
1328                                         "write lba x%lx\n", (unsigned long)lba);
1329                                 break;
1330                         case SCSI_PROT_WRITE_STRIP:
1331                                 /*
1332                                  * For WRITE_STRIP and WRITE_PASS,
1333                                  * force the error on data
1334                                  * being copied from SLI-Host to SLI-Port.
1335                                  */
1336                                 phba->lpfc_injerr_wgrd_cnt--;
1337                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1338                                         phba->lpfc_injerr_nportid = 0;
1339                                         phba->lpfc_injerr_lba =
1340                                                 LPFC_INJERR_LBA_OFF;
1341                                         memset(&phba->lpfc_injerr_wwpn,
1342                                                 0, sizeof(struct lpfc_name));
1343                                 }
1344
1345                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1346                                 /* Signals the caller to swap CRC->CSUM */
1347
1348                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1349                                         "0816 BLKGRD: Injecting guard error: "
1350                                         "write lba x%lx\n", (unsigned long)lba);
1351                                 break;
1352                         }
1353                 }
1354                 if (phba->lpfc_injerr_rgrd_cnt) {
1355                         switch (op) {
1356                         case SCSI_PROT_READ_INSERT:
1357                         case SCSI_PROT_READ_STRIP:
1358                         case SCSI_PROT_READ_PASS:
1359                                 /*
1360                                  * For READ_STRIP and READ_PASS, force the
1361                                  * error on data being read off the wire. It
1362                                  * should force an IO error to the driver.
1363                                  */
1364                                 phba->lpfc_injerr_rgrd_cnt--;
1365                                 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1366                                         phba->lpfc_injerr_nportid = 0;
1367                                         phba->lpfc_injerr_lba =
1368                                                 LPFC_INJERR_LBA_OFF;
1369                                         memset(&phba->lpfc_injerr_wwpn,
1370                                                 0, sizeof(struct lpfc_name));
1371                                 }
1372
1373                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1374                                 /* Signals the caller to swap CRC->CSUM */
1375
1376                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1377                                         "0818 BLKGRD: Injecting guard error: "
1378                                         "read lba x%lx\n", (unsigned long)lba);
1379                         }
1380                 }
1381         }
1382
1383         return rc;
1384 }
1385 #endif
1386
1387 /**
1388  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1389  * the specified SCSI command.
1390  * @phba: The Hba for which this call is being executed.
1391  * @sc: The SCSI command to examine
1392  * @txop: (out) BlockGuard operation for transmitted data
1393  * @rxop: (out) BlockGuard operation for received data
1394  *
1395  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1396  *
1397  **/
1398 static int
1399 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1400                 uint8_t *txop, uint8_t *rxop)
1401 {
1402         uint8_t ret = 0;
1403
1404         if (lpfc_cmd_guard_csum(sc)) {
1405                 switch (scsi_get_prot_op(sc)) {
1406                 case SCSI_PROT_READ_INSERT:
1407                 case SCSI_PROT_WRITE_STRIP:
1408                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1409                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1410                         break;
1411
1412                 case SCSI_PROT_READ_STRIP:
1413                 case SCSI_PROT_WRITE_INSERT:
1414                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1415                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1416                         break;
1417
1418                 case SCSI_PROT_READ_PASS:
1419                 case SCSI_PROT_WRITE_PASS:
1420                         *rxop = BG_OP_IN_CRC_OUT_CSUM;
1421                         *txop = BG_OP_IN_CSUM_OUT_CRC;
1422                         break;
1423
1424                 case SCSI_PROT_NORMAL:
1425                 default:
1426                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1427                                 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1428                                         scsi_get_prot_op(sc));
1429                         ret = 1;
1430                         break;
1431
1432                 }
1433         } else {
1434                 switch (scsi_get_prot_op(sc)) {
1435                 case SCSI_PROT_READ_STRIP:
1436                 case SCSI_PROT_WRITE_INSERT:
1437                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1438                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1439                         break;
1440
1441                 case SCSI_PROT_READ_PASS:
1442                 case SCSI_PROT_WRITE_PASS:
1443                         *rxop = BG_OP_IN_CRC_OUT_CRC;
1444                         *txop = BG_OP_IN_CRC_OUT_CRC;
1445                         break;
1446
1447                 case SCSI_PROT_READ_INSERT:
1448                 case SCSI_PROT_WRITE_STRIP:
1449                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1450                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1451                         break;
1452
1453                 case SCSI_PROT_NORMAL:
1454                 default:
1455                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1456                                 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1457                                         scsi_get_prot_op(sc));
1458                         ret = 1;
1459                         break;
1460                 }
1461         }
1462
1463         return ret;
1464 }
1465
1466 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1467 /**
1468  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1469  * the specified SCSI command in order to force a guard tag error.
1470  * @phba: The Hba for which this call is being executed.
1471  * @sc: The SCSI command to examine
1472  * @txop: (out) BlockGuard operation for transmitted data
1473  * @rxop: (out) BlockGuard operation for received data
1474  *
1475  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1476  *
1477  **/
1478 static int
1479 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1480                 uint8_t *txop, uint8_t *rxop)
1481 {
1482         uint8_t ret = 0;
1483
1484         if (lpfc_cmd_guard_csum(sc)) {
1485                 switch (scsi_get_prot_op(sc)) {
1486                 case SCSI_PROT_READ_INSERT:
1487                 case SCSI_PROT_WRITE_STRIP:
1488                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1489                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1490                         break;
1491
1492                 case SCSI_PROT_READ_STRIP:
1493                 case SCSI_PROT_WRITE_INSERT:
1494                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1495                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1496                         break;
1497
1498                 case SCSI_PROT_READ_PASS:
1499                 case SCSI_PROT_WRITE_PASS:
1500                         *rxop = BG_OP_IN_CSUM_OUT_CRC;
1501                         *txop = BG_OP_IN_CRC_OUT_CSUM;
1502                         break;
1503
1504                 case SCSI_PROT_NORMAL:
1505                 default:
1506                         break;
1507
1508                 }
1509         } else {
1510                 switch (scsi_get_prot_op(sc)) {
1511                 case SCSI_PROT_READ_STRIP:
1512                 case SCSI_PROT_WRITE_INSERT:
1513                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1514                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1515                         break;
1516
1517                 case SCSI_PROT_READ_PASS:
1518                 case SCSI_PROT_WRITE_PASS:
1519                         *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1520                         *txop = BG_OP_IN_CSUM_OUT_CSUM;
1521                         break;
1522
1523                 case SCSI_PROT_READ_INSERT:
1524                 case SCSI_PROT_WRITE_STRIP:
1525                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1526                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1527                         break;
1528
1529                 case SCSI_PROT_NORMAL:
1530                 default:
1531                         break;
1532                 }
1533         }
1534
1535         return ret;
1536 }
1537 #endif
1538
1539 /**
1540  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1541  * @phba: The Hba for which this call is being executed.
1542  * @sc: pointer to scsi command we're working on
1543  * @bpl: pointer to buffer list for protection groups
1544  * @datasegcnt: number of segments of data that have been dma mapped
1545  *
1546  * This function sets up BPL buffer list for protection groups of
1547  * type LPFC_PG_TYPE_NO_DIF
1548  *
1549  * This is usually used when the HBA is instructed to generate
1550  * DIFs and insert them into data stream (or strip DIF from
1551  * incoming data stream)
1552  *
1553  * The buffer list consists of just one protection group described
1554  * below:
1555  *                                +-------------------------+
1556  *   start of prot group  -->     |          PDE_5          |
1557  *                                +-------------------------+
1558  *                                |          PDE_6          |
1559  *                                +-------------------------+
1560  *                                |         Data BDE        |
1561  *                                +-------------------------+
1562  *                                |more Data BDE's ... (opt)|
1563  *                                +-------------------------+
1564  *
1565  *
1566  * Note: Data s/g buffers have been dma mapped
1567  *
1568  * Returns the number of BDEs added to the BPL.
1569  **/
1570 static int
1571 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1572                 struct ulp_bde64 *bpl, int datasegcnt)
1573 {
1574         struct scatterlist *sgde = NULL; /* s/g data entry */
1575         struct lpfc_pde5 *pde5 = NULL;
1576         struct lpfc_pde6 *pde6 = NULL;
1577         dma_addr_t physaddr;
1578         int i = 0, num_bde = 0, status;
1579         int datadir = sc->sc_data_direction;
1580 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1581         uint32_t rc;
1582 #endif
1583         uint32_t checking = 1;
1584         uint32_t reftag;
1585         uint8_t txop, rxop;
1586
1587         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1588         if (status)
1589                 goto out;
1590
1591         /* extract some info from the scsi command for pde*/
1592         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1593
1594 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1595         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1596         if (rc) {
1597                 if (rc & BG_ERR_SWAP)
1598                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1599                 if (rc & BG_ERR_CHECK)
1600                         checking = 0;
1601         }
1602 #endif
1603
1604         /* setup PDE5 with what we have */
1605         pde5 = (struct lpfc_pde5 *) bpl;
1606         memset(pde5, 0, sizeof(struct lpfc_pde5));
1607         bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1608
1609         /* Endianness conversion if necessary for PDE5 */
1610         pde5->word0 = cpu_to_le32(pde5->word0);
1611         pde5->reftag = cpu_to_le32(reftag);
1612
1613         /* advance bpl and increment bde count */
1614         num_bde++;
1615         bpl++;
1616         pde6 = (struct lpfc_pde6 *) bpl;
1617
1618         /* setup PDE6 with the rest of the info */
1619         memset(pde6, 0, sizeof(struct lpfc_pde6));
1620         bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1621         bf_set(pde6_optx, pde6, txop);
1622         bf_set(pde6_oprx, pde6, rxop);
1623
1624         /*
1625          * We only need to check the data on READs, for WRITEs
1626          * protection data is automatically generated, not checked.
1627          */
1628         if (datadir == DMA_FROM_DEVICE) {
1629                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1630                         bf_set(pde6_ce, pde6, checking);
1631                 else
1632                         bf_set(pde6_ce, pde6, 0);
1633
1634                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1635                         bf_set(pde6_re, pde6, checking);
1636                 else
1637                         bf_set(pde6_re, pde6, 0);
1638         }
1639         bf_set(pde6_ai, pde6, 1);
1640         bf_set(pde6_ae, pde6, 0);
1641         bf_set(pde6_apptagval, pde6, 0);
1642
1643         /* Endianness conversion if necessary for PDE6 */
1644         pde6->word0 = cpu_to_le32(pde6->word0);
1645         pde6->word1 = cpu_to_le32(pde6->word1);
1646         pde6->word2 = cpu_to_le32(pde6->word2);
1647
1648         /* advance bpl and increment bde count */
1649         num_bde++;
1650         bpl++;
1651
1652         /* assumption: caller has already run dma_map_sg on command data */
1653         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1654                 physaddr = sg_dma_address(sgde);
1655                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1656                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1657                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1658                 if (datadir == DMA_TO_DEVICE)
1659                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1660                 else
1661                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1662                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1663                 bpl++;
1664                 num_bde++;
1665         }
1666
1667 out:
1668         return num_bde;
1669 }
1670
1671 /**
1672  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1673  * @phba: The Hba for which this call is being executed.
1674  * @sc: pointer to scsi command we're working on
1675  * @bpl: pointer to buffer list for protection groups
1676  * @datacnt: number of segments of data that have been dma mapped
1677  * @protcnt: number of segment of protection data that have been dma mapped
1678  *
1679  * This function sets up BPL buffer list for protection groups of
1680  * type LPFC_PG_TYPE_DIF
1681  *
1682  * This is usually used when DIFs are in their own buffers,
1683  * separate from the data. The HBA can then by instructed
1684  * to place the DIFs in the outgoing stream.  For read operations,
1685  * The HBA could extract the DIFs and place it in DIF buffers.
1686  *
1687  * The buffer list for this type consists of one or more of the
1688  * protection groups described below:
1689  *                                    +-------------------------+
1690  *   start of first prot group  -->   |          PDE_5          |
1691  *                                    +-------------------------+
1692  *                                    |          PDE_6          |
1693  *                                    +-------------------------+
1694  *                                    |      PDE_7 (Prot BDE)   |
1695  *                                    +-------------------------+
1696  *                                    |        Data BDE         |
1697  *                                    +-------------------------+
1698  *                                    |more Data BDE's ... (opt)|
1699  *                                    +-------------------------+
1700  *   start of new  prot group  -->    |          PDE_5          |
1701  *                                    +-------------------------+
1702  *                                    |          ...            |
1703  *                                    +-------------------------+
1704  *
1705  * Note: It is assumed that both data and protection s/g buffers have been
1706  *       mapped for DMA
1707  *
1708  * Returns the number of BDEs added to the BPL.
1709  **/
1710 static int
1711 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1712                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1713 {
1714         struct scatterlist *sgde = NULL; /* s/g data entry */
1715         struct scatterlist *sgpe = NULL; /* s/g prot entry */
1716         struct lpfc_pde5 *pde5 = NULL;
1717         struct lpfc_pde6 *pde6 = NULL;
1718         struct lpfc_pde7 *pde7 = NULL;
1719         dma_addr_t dataphysaddr, protphysaddr;
1720         unsigned short curr_data = 0, curr_prot = 0;
1721         unsigned int split_offset;
1722         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1723         unsigned int protgrp_blks, protgrp_bytes;
1724         unsigned int remainder, subtotal;
1725         int status;
1726         int datadir = sc->sc_data_direction;
1727         unsigned char pgdone = 0, alldone = 0;
1728         unsigned blksize;
1729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1730         uint32_t rc;
1731 #endif
1732         uint32_t checking = 1;
1733         uint32_t reftag;
1734         uint8_t txop, rxop;
1735         int num_bde = 0;
1736
1737         sgpe = scsi_prot_sglist(sc);
1738         sgde = scsi_sglist(sc);
1739
1740         if (!sgpe || !sgde) {
1741                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1742                                 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1743                                 sgpe, sgde);
1744                 return 0;
1745         }
1746
1747         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1748         if (status)
1749                 goto out;
1750
1751         /* extract some info from the scsi command */
1752         blksize = lpfc_cmd_blksize(sc);
1753         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1754
1755 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1756         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1757         if (rc) {
1758                 if (rc & BG_ERR_SWAP)
1759                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1760                 if (rc & BG_ERR_CHECK)
1761                         checking = 0;
1762         }
1763 #endif
1764
1765         split_offset = 0;
1766         do {
1767                 /* Check to see if we ran out of space */
1768                 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1769                         return num_bde + 3;
1770
1771                 /* setup PDE5 with what we have */
1772                 pde5 = (struct lpfc_pde5 *) bpl;
1773                 memset(pde5, 0, sizeof(struct lpfc_pde5));
1774                 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1775
1776                 /* Endianness conversion if necessary for PDE5 */
1777                 pde5->word0 = cpu_to_le32(pde5->word0);
1778                 pde5->reftag = cpu_to_le32(reftag);
1779
1780                 /* advance bpl and increment bde count */
1781                 num_bde++;
1782                 bpl++;
1783                 pde6 = (struct lpfc_pde6 *) bpl;
1784
1785                 /* setup PDE6 with the rest of the info */
1786                 memset(pde6, 0, sizeof(struct lpfc_pde6));
1787                 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1788                 bf_set(pde6_optx, pde6, txop);
1789                 bf_set(pde6_oprx, pde6, rxop);
1790
1791                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1792                         bf_set(pde6_ce, pde6, checking);
1793                 else
1794                         bf_set(pde6_ce, pde6, 0);
1795
1796                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1797                         bf_set(pde6_re, pde6, checking);
1798                 else
1799                         bf_set(pde6_re, pde6, 0);
1800
1801                 bf_set(pde6_ai, pde6, 1);
1802                 bf_set(pde6_ae, pde6, 0);
1803                 bf_set(pde6_apptagval, pde6, 0);
1804
1805                 /* Endianness conversion if necessary for PDE6 */
1806                 pde6->word0 = cpu_to_le32(pde6->word0);
1807                 pde6->word1 = cpu_to_le32(pde6->word1);
1808                 pde6->word2 = cpu_to_le32(pde6->word2);
1809
1810                 /* advance bpl and increment bde count */
1811                 num_bde++;
1812                 bpl++;
1813
1814                 /* setup the first BDE that points to protection buffer */
1815                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1816                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1817
1818                 /* must be integer multiple of the DIF block length */
1819                 BUG_ON(protgroup_len % 8);
1820
1821                 pde7 = (struct lpfc_pde7 *) bpl;
1822                 memset(pde7, 0, sizeof(struct lpfc_pde7));
1823                 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1824
1825                 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1826                 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1827
1828                 protgrp_blks = protgroup_len / 8;
1829                 protgrp_bytes = protgrp_blks * blksize;
1830
1831                 /* check if this pde is crossing the 4K boundary; if so split */
1832                 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1833                         protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1834                         protgroup_offset += protgroup_remainder;
1835                         protgrp_blks = protgroup_remainder / 8;
1836                         protgrp_bytes = protgrp_blks * blksize;
1837                 } else {
1838                         protgroup_offset = 0;
1839                         curr_prot++;
1840                 }
1841
1842                 num_bde++;
1843
1844                 /* setup BDE's for data blocks associated with DIF data */
1845                 pgdone = 0;
1846                 subtotal = 0; /* total bytes processed for current prot grp */
1847                 while (!pgdone) {
1848                         /* Check to see if we ran out of space */
1849                         if (num_bde >= phba->cfg_total_seg_cnt)
1850                                 return num_bde + 1;
1851
1852                         if (!sgde) {
1853                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1854                                         "9065 BLKGRD:%s Invalid data segment\n",
1855                                                 __func__);
1856                                 return 0;
1857                         }
1858                         bpl++;
1859                         dataphysaddr = sg_dma_address(sgde) + split_offset;
1860                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1861                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1862
1863                         remainder = sg_dma_len(sgde) - split_offset;
1864
1865                         if ((subtotal + remainder) <= protgrp_bytes) {
1866                                 /* we can use this whole buffer */
1867                                 bpl->tus.f.bdeSize = remainder;
1868                                 split_offset = 0;
1869
1870                                 if ((subtotal + remainder) == protgrp_bytes)
1871                                         pgdone = 1;
1872                         } else {
1873                                 /* must split this buffer with next prot grp */
1874                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1875                                 split_offset += bpl->tus.f.bdeSize;
1876                         }
1877
1878                         subtotal += bpl->tus.f.bdeSize;
1879
1880                         if (datadir == DMA_TO_DEVICE)
1881                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1882                         else
1883                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1884                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
1885
1886                         num_bde++;
1887                         curr_data++;
1888
1889                         if (split_offset)
1890                                 break;
1891
1892                         /* Move to the next s/g segment if possible */
1893                         sgde = sg_next(sgde);
1894
1895                 }
1896
1897                 if (protgroup_offset) {
1898                         /* update the reference tag */
1899                         reftag += protgrp_blks;
1900                         bpl++;
1901                         continue;
1902                 }
1903
1904                 /* are we done ? */
1905                 if (curr_prot == protcnt) {
1906                         alldone = 1;
1907                 } else if (curr_prot < protcnt) {
1908                         /* advance to next prot buffer */
1909                         sgpe = sg_next(sgpe);
1910                         bpl++;
1911
1912                         /* update the reference tag */
1913                         reftag += protgrp_blks;
1914                 } else {
1915                         /* if we're here, we have a bug */
1916                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1917                                         "9054 BLKGRD: bug in %s\n", __func__);
1918                 }
1919
1920         } while (!alldone);
1921 out:
1922
1923         return num_bde;
1924 }
1925
1926 /**
1927  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1928  * @phba: The Hba for which this call is being executed.
1929  * @sc: pointer to scsi command we're working on
1930  * @sgl: pointer to buffer list for protection groups
1931  * @datasegcnt: number of segments of data that have been dma mapped
1932  * @lpfc_cmd: lpfc scsi command object pointer.
1933  *
1934  * This function sets up SGL buffer list for protection groups of
1935  * type LPFC_PG_TYPE_NO_DIF
1936  *
1937  * This is usually used when the HBA is instructed to generate
1938  * DIFs and insert them into data stream (or strip DIF from
1939  * incoming data stream)
1940  *
1941  * The buffer list consists of just one protection group described
1942  * below:
1943  *                                +-------------------------+
1944  *   start of prot group  -->     |         DI_SEED         |
1945  *                                +-------------------------+
1946  *                                |         Data SGE        |
1947  *                                +-------------------------+
1948  *                                |more Data SGE's ... (opt)|
1949  *                                +-------------------------+
1950  *
1951  *
1952  * Note: Data s/g buffers have been dma mapped
1953  *
1954  * Returns the number of SGEs added to the SGL.
1955  **/
1956 static int
1957 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1958                 struct sli4_sge *sgl, int datasegcnt,
1959                 struct lpfc_io_buf *lpfc_cmd)
1960 {
1961         struct scatterlist *sgde = NULL; /* s/g data entry */
1962         struct sli4_sge_diseed *diseed = NULL;
1963         dma_addr_t physaddr;
1964         int i = 0, num_sge = 0, status;
1965         uint32_t reftag;
1966         uint8_t txop, rxop;
1967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1968         uint32_t rc;
1969 #endif
1970         uint32_t checking = 1;
1971         uint32_t dma_len;
1972         uint32_t dma_offset = 0;
1973         struct sli4_hybrid_sgl *sgl_xtra = NULL;
1974         int j;
1975         bool lsp_just_set = false;
1976
1977         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1978         if (status)
1979                 goto out;
1980
1981         /* extract some info from the scsi command for pde*/
1982         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1983
1984 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1985         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1986         if (rc) {
1987                 if (rc & BG_ERR_SWAP)
1988                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1989                 if (rc & BG_ERR_CHECK)
1990                         checking = 0;
1991         }
1992 #endif
1993
1994         /* setup DISEED with what we have */
1995         diseed = (struct sli4_sge_diseed *) sgl;
1996         memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1997         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1998
1999         /* Endianness conversion if necessary */
2000         diseed->ref_tag = cpu_to_le32(reftag);
2001         diseed->ref_tag_tran = diseed->ref_tag;
2002
2003         /*
2004          * We only need to check the data on READs, for WRITEs
2005          * protection data is automatically generated, not checked.
2006          */
2007         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2008                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2009                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2010                 else
2011                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2012
2013                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2014                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2015                 else
2016                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2017         }
2018
2019         /* setup DISEED with the rest of the info */
2020         bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2021         bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2022
2023         bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2024         bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2025
2026         /* Endianness conversion if necessary for DISEED */
2027         diseed->word2 = cpu_to_le32(diseed->word2);
2028         diseed->word3 = cpu_to_le32(diseed->word3);
2029
2030         /* advance bpl and increment sge count */
2031         num_sge++;
2032         sgl++;
2033
2034         /* assumption: caller has already run dma_map_sg on command data */
2035         sgde = scsi_sglist(sc);
2036         j = 3;
2037         for (i = 0; i < datasegcnt; i++) {
2038                 /* clear it */
2039                 sgl->word2 = 0;
2040
2041                 /* do we need to expand the segment */
2042                 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2043                     ((datasegcnt - 1) != i)) {
2044                         /* set LSP type */
2045                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2046
2047                         sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2048
2049                         if (unlikely(!sgl_xtra)) {
2050                                 lpfc_cmd->seg_cnt = 0;
2051                                 return 0;
2052                         }
2053                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
2054                                                 sgl_xtra->dma_phys_sgl));
2055                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2056                                                 sgl_xtra->dma_phys_sgl));
2057
2058                 } else {
2059                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2060                 }
2061
2062                 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2063                         if ((datasegcnt - 1) == i)
2064                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
2065                         physaddr = sg_dma_address(sgde);
2066                         dma_len = sg_dma_len(sgde);
2067                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2068                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2069
2070                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2071                         sgl->word2 = cpu_to_le32(sgl->word2);
2072                         sgl->sge_len = cpu_to_le32(dma_len);
2073
2074                         dma_offset += dma_len;
2075                         sgde = sg_next(sgde);
2076
2077                         sgl++;
2078                         num_sge++;
2079                         lsp_just_set = false;
2080
2081                 } else {
2082                         sgl->word2 = cpu_to_le32(sgl->word2);
2083                         sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2084
2085                         sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2086                         i = i - 1;
2087
2088                         lsp_just_set = true;
2089                 }
2090
2091                 j++;
2092
2093         }
2094
2095 out:
2096         return num_sge;
2097 }
2098
2099 /**
2100  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2101  * @phba: The Hba for which this call is being executed.
2102  * @sc: pointer to scsi command we're working on
2103  * @sgl: pointer to buffer list for protection groups
2104  * @datacnt: number of segments of data that have been dma mapped
2105  * @protcnt: number of segment of protection data that have been dma mapped
2106  * @lpfc_cmd: lpfc scsi command object pointer.
2107  *
2108  * This function sets up SGL buffer list for protection groups of
2109  * type LPFC_PG_TYPE_DIF
2110  *
2111  * This is usually used when DIFs are in their own buffers,
2112  * separate from the data. The HBA can then by instructed
2113  * to place the DIFs in the outgoing stream.  For read operations,
2114  * The HBA could extract the DIFs and place it in DIF buffers.
2115  *
2116  * The buffer list for this type consists of one or more of the
2117  * protection groups described below:
2118  *                                    +-------------------------+
2119  *   start of first prot group  -->   |         DISEED          |
2120  *                                    +-------------------------+
2121  *                                    |      DIF (Prot SGE)     |
2122  *                                    +-------------------------+
2123  *                                    |        Data SGE         |
2124  *                                    +-------------------------+
2125  *                                    |more Data SGE's ... (opt)|
2126  *                                    +-------------------------+
2127  *   start of new  prot group  -->    |         DISEED          |
2128  *                                    +-------------------------+
2129  *                                    |          ...            |
2130  *                                    +-------------------------+
2131  *
2132  * Note: It is assumed that both data and protection s/g buffers have been
2133  *       mapped for DMA
2134  *
2135  * Returns the number of SGEs added to the SGL.
2136  **/
2137 static int
2138 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2139                 struct sli4_sge *sgl, int datacnt, int protcnt,
2140                 struct lpfc_io_buf *lpfc_cmd)
2141 {
2142         struct scatterlist *sgde = NULL; /* s/g data entry */
2143         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2144         struct sli4_sge_diseed *diseed = NULL;
2145         dma_addr_t dataphysaddr, protphysaddr;
2146         unsigned short curr_data = 0, curr_prot = 0;
2147         unsigned int split_offset;
2148         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2149         unsigned int protgrp_blks, protgrp_bytes;
2150         unsigned int remainder, subtotal;
2151         int status;
2152         unsigned char pgdone = 0, alldone = 0;
2153         unsigned blksize;
2154         uint32_t reftag;
2155         uint8_t txop, rxop;
2156         uint32_t dma_len;
2157 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2158         uint32_t rc;
2159 #endif
2160         uint32_t checking = 1;
2161         uint32_t dma_offset = 0;
2162         int num_sge = 0, j = 2;
2163         struct sli4_hybrid_sgl *sgl_xtra = NULL;
2164
2165         sgpe = scsi_prot_sglist(sc);
2166         sgde = scsi_sglist(sc);
2167
2168         if (!sgpe || !sgde) {
2169                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2170                                 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2171                                 sgpe, sgde);
2172                 return 0;
2173         }
2174
2175         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2176         if (status)
2177                 goto out;
2178
2179         /* extract some info from the scsi command */
2180         blksize = lpfc_cmd_blksize(sc);
2181         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2182
2183 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2184         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2185         if (rc) {
2186                 if (rc & BG_ERR_SWAP)
2187                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2188                 if (rc & BG_ERR_CHECK)
2189                         checking = 0;
2190         }
2191 #endif
2192
2193         split_offset = 0;
2194         do {
2195                 /* Check to see if we ran out of space */
2196                 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2197                     !(phba->cfg_xpsgl))
2198                         return num_sge + 3;
2199
2200                 /* DISEED and DIF have to be together */
2201                 if (!((j + 1) % phba->border_sge_num) ||
2202                     !((j + 2) % phba->border_sge_num) ||
2203                     !((j + 3) % phba->border_sge_num)) {
2204                         sgl->word2 = 0;
2205
2206                         /* set LSP type */
2207                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2208
2209                         sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2210
2211                         if (unlikely(!sgl_xtra)) {
2212                                 goto out;
2213                         } else {
2214                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2215                                                 sgl_xtra->dma_phys_sgl));
2216                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2217                                                        sgl_xtra->dma_phys_sgl));
2218                         }
2219
2220                         sgl->word2 = cpu_to_le32(sgl->word2);
2221                         sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2222
2223                         sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2224                         j = 0;
2225                 }
2226
2227                 /* setup DISEED with what we have */
2228                 diseed = (struct sli4_sge_diseed *) sgl;
2229                 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2230                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2231
2232                 /* Endianness conversion if necessary */
2233                 diseed->ref_tag = cpu_to_le32(reftag);
2234                 diseed->ref_tag_tran = diseed->ref_tag;
2235
2236                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2237                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2238
2239                 } else {
2240                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2241                         /*
2242                          * When in this mode, the hardware will replace
2243                          * the guard tag from the host with a
2244                          * newly generated good CRC for the wire.
2245                          * Switch to raw mode here to avoid this
2246                          * behavior. What the host sends gets put on the wire.
2247                          */
2248                         if (txop == BG_OP_IN_CRC_OUT_CRC) {
2249                                 txop = BG_OP_RAW_MODE;
2250                                 rxop = BG_OP_RAW_MODE;
2251                         }
2252                 }
2253
2254
2255                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2256                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2257                 else
2258                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2259
2260                 /* setup DISEED with the rest of the info */
2261                 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2262                 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2263
2264                 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2265                 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2266
2267                 /* Endianness conversion if necessary for DISEED */
2268                 diseed->word2 = cpu_to_le32(diseed->word2);
2269                 diseed->word3 = cpu_to_le32(diseed->word3);
2270
2271                 /* advance sgl and increment bde count */
2272                 num_sge++;
2273
2274                 sgl++;
2275                 j++;
2276
2277                 /* setup the first BDE that points to protection buffer */
2278                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2279                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2280
2281                 /* must be integer multiple of the DIF block length */
2282                 BUG_ON(protgroup_len % 8);
2283
2284                 /* Now setup DIF SGE */
2285                 sgl->word2 = 0;
2286                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2287                 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2288                 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2289                 sgl->word2 = cpu_to_le32(sgl->word2);
2290                 sgl->sge_len = 0;
2291
2292                 protgrp_blks = protgroup_len / 8;
2293                 protgrp_bytes = protgrp_blks * blksize;
2294
2295                 /* check if DIF SGE is crossing the 4K boundary; if so split */
2296                 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2297                         protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2298                         protgroup_offset += protgroup_remainder;
2299                         protgrp_blks = protgroup_remainder / 8;
2300                         protgrp_bytes = protgrp_blks * blksize;
2301                 } else {
2302                         protgroup_offset = 0;
2303                         curr_prot++;
2304                 }
2305
2306                 num_sge++;
2307
2308                 /* setup SGE's for data blocks associated with DIF data */
2309                 pgdone = 0;
2310                 subtotal = 0; /* total bytes processed for current prot grp */
2311
2312                 sgl++;
2313                 j++;
2314
2315                 while (!pgdone) {
2316                         /* Check to see if we ran out of space */
2317                         if ((num_sge >= phba->cfg_total_seg_cnt) &&
2318                             !phba->cfg_xpsgl)
2319                                 return num_sge + 1;
2320
2321                         if (!sgde) {
2322                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2323                                         "9086 BLKGRD:%s Invalid data segment\n",
2324                                                 __func__);
2325                                 return 0;
2326                         }
2327
2328                         if (!((j + 1) % phba->border_sge_num)) {
2329                                 sgl->word2 = 0;
2330
2331                                 /* set LSP type */
2332                                 bf_set(lpfc_sli4_sge_type, sgl,
2333                                        LPFC_SGE_TYPE_LSP);
2334
2335                                 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2336                                                                  lpfc_cmd);
2337
2338                                 if (unlikely(!sgl_xtra)) {
2339                                         goto out;
2340                                 } else {
2341                                         sgl->addr_lo = cpu_to_le32(
2342                                           putPaddrLow(sgl_xtra->dma_phys_sgl));
2343                                         sgl->addr_hi = cpu_to_le32(
2344                                           putPaddrHigh(sgl_xtra->dma_phys_sgl));
2345                                 }
2346
2347                                 sgl->word2 = cpu_to_le32(sgl->word2);
2348                                 sgl->sge_len = cpu_to_le32(
2349                                                      phba->cfg_sg_dma_buf_size);
2350
2351                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2352                         } else {
2353                                 dataphysaddr = sg_dma_address(sgde) +
2354                                                                    split_offset;
2355
2356                                 remainder = sg_dma_len(sgde) - split_offset;
2357
2358                                 if ((subtotal + remainder) <= protgrp_bytes) {
2359                                         /* we can use this whole buffer */
2360                                         dma_len = remainder;
2361                                         split_offset = 0;
2362
2363                                         if ((subtotal + remainder) ==
2364                                                                   protgrp_bytes)
2365                                                 pgdone = 1;
2366                                 } else {
2367                                         /* must split this buffer with next
2368                                          * prot grp
2369                                          */
2370                                         dma_len = protgrp_bytes - subtotal;
2371                                         split_offset += dma_len;
2372                                 }
2373
2374                                 subtotal += dma_len;
2375
2376                                 sgl->word2 = 0;
2377                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2378                                                                  dataphysaddr));
2379                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2380                                                                  dataphysaddr));
2381                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2382                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2383                                 bf_set(lpfc_sli4_sge_type, sgl,
2384                                        LPFC_SGE_TYPE_DATA);
2385
2386                                 sgl->sge_len = cpu_to_le32(dma_len);
2387                                 dma_offset += dma_len;
2388
2389                                 num_sge++;
2390                                 curr_data++;
2391
2392                                 if (split_offset) {
2393                                         sgl++;
2394                                         j++;
2395                                         break;
2396                                 }
2397
2398                                 /* Move to the next s/g segment if possible */
2399                                 sgde = sg_next(sgde);
2400
2401                                 sgl++;
2402                         }
2403
2404                         j++;
2405                 }
2406
2407                 if (protgroup_offset) {
2408                         /* update the reference tag */
2409                         reftag += protgrp_blks;
2410                         continue;
2411                 }
2412
2413                 /* are we done ? */
2414                 if (curr_prot == protcnt) {
2415                         /* mark the last SGL */
2416                         sgl--;
2417                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2418                         alldone = 1;
2419                 } else if (curr_prot < protcnt) {
2420                         /* advance to next prot buffer */
2421                         sgpe = sg_next(sgpe);
2422
2423                         /* update the reference tag */
2424                         reftag += protgrp_blks;
2425                 } else {
2426                         /* if we're here, we have a bug */
2427                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2428                                         "9085 BLKGRD: bug in %s\n", __func__);
2429                 }
2430
2431         } while (!alldone);
2432
2433 out:
2434
2435         return num_sge;
2436 }
2437
2438 /**
2439  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2440  * @phba: The Hba for which this call is being executed.
2441  * @sc: pointer to scsi command we're working on
2442  *
2443  * Given a SCSI command that supports DIF, determine composition of protection
2444  * groups involved in setting up buffer lists
2445  *
2446  * Returns: Protection group type (with or without DIF)
2447  *
2448  **/
2449 static int
2450 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2451 {
2452         int ret = LPFC_PG_TYPE_INVALID;
2453         unsigned char op = scsi_get_prot_op(sc);
2454
2455         switch (op) {
2456         case SCSI_PROT_READ_STRIP:
2457         case SCSI_PROT_WRITE_INSERT:
2458                 ret = LPFC_PG_TYPE_NO_DIF;
2459                 break;
2460         case SCSI_PROT_READ_INSERT:
2461         case SCSI_PROT_WRITE_STRIP:
2462         case SCSI_PROT_READ_PASS:
2463         case SCSI_PROT_WRITE_PASS:
2464                 ret = LPFC_PG_TYPE_DIF_BUF;
2465                 break;
2466         default:
2467                 if (phba)
2468                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2469                                         "9021 Unsupported protection op:%d\n",
2470                                         op);
2471                 break;
2472         }
2473         return ret;
2474 }
2475
2476 /**
2477  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2478  * @phba: The Hba for which this call is being executed.
2479  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2480  *
2481  * Adjust the data length to account for how much data
2482  * is actually on the wire.
2483  *
2484  * returns the adjusted data length
2485  **/
2486 static int
2487 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2488                        struct lpfc_io_buf *lpfc_cmd)
2489 {
2490         struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2491         int fcpdl;
2492
2493         fcpdl = scsi_bufflen(sc);
2494
2495         /* Check if there is protection data on the wire */
2496         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2497                 /* Read check for protection data */
2498                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2499                         return fcpdl;
2500
2501         } else {
2502                 /* Write check for protection data */
2503                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2504                         return fcpdl;
2505         }
2506
2507         /*
2508          * If we are in DIF Type 1 mode every data block has a 8 byte
2509          * DIF (trailer) attached to it. Must ajust FCP data length
2510          * to account for the protection data.
2511          */
2512         fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2513
2514         return fcpdl;
2515 }
2516
2517 /**
2518  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2519  * @phba: The Hba for which this call is being executed.
2520  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2521  *
2522  * This is the protection/DIF aware version of
2523  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2524  * two functions eventually, but for now, it's here.
2525  * RETURNS 0 - SUCCESS,
2526  *         1 - Failed DMA map, retry.
2527  *         2 - Invalid scsi cmd or prot-type. Do not rety.
2528  **/
2529 static int
2530 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2531                 struct lpfc_io_buf *lpfc_cmd)
2532 {
2533         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2534         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2535         struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2536         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2537         uint32_t num_bde = 0;
2538         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2539         int prot_group_type = 0;
2540         int fcpdl;
2541         int ret = 1;
2542         struct lpfc_vport *vport = phba->pport;
2543
2544         /*
2545          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2546          *  fcp_rsp regions to the first data bde entry
2547          */
2548         bpl += 2;
2549         if (scsi_sg_count(scsi_cmnd)) {
2550                 /*
2551                  * The driver stores the segment count returned from pci_map_sg
2552                  * because this a count of dma-mappings used to map the use_sg
2553                  * pages.  They are not guaranteed to be the same for those
2554                  * architectures that implement an IOMMU.
2555                  */
2556                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2557                                         scsi_sglist(scsi_cmnd),
2558                                         scsi_sg_count(scsi_cmnd), datadir);
2559                 if (unlikely(!datasegcnt))
2560                         return 1;
2561
2562                 lpfc_cmd->seg_cnt = datasegcnt;
2563
2564                 /* First check if data segment count from SCSI Layer is good */
2565                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2566                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2567                         ret = 2;
2568                         goto err;
2569                 }
2570
2571                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2572
2573                 switch (prot_group_type) {
2574                 case LPFC_PG_TYPE_NO_DIF:
2575
2576                         /* Here we need to add a PDE5 and PDE6 to the count */
2577                         if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2578                                 ret = 2;
2579                                 goto err;
2580                         }
2581
2582                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2583                                         datasegcnt);
2584                         /* we should have 2 or more entries in buffer list */
2585                         if (num_bde < 2) {
2586                                 ret = 2;
2587                                 goto err;
2588                         }
2589                         break;
2590
2591                 case LPFC_PG_TYPE_DIF_BUF:
2592                         /*
2593                          * This type indicates that protection buffers are
2594                          * passed to the driver, so that needs to be prepared
2595                          * for DMA
2596                          */
2597                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
2598                                         scsi_prot_sglist(scsi_cmnd),
2599                                         scsi_prot_sg_count(scsi_cmnd), datadir);
2600                         if (unlikely(!protsegcnt)) {
2601                                 scsi_dma_unmap(scsi_cmnd);
2602                                 return 1;
2603                         }
2604
2605                         lpfc_cmd->prot_seg_cnt = protsegcnt;
2606
2607                         /*
2608                          * There is a minimun of 4 BPLs used for every
2609                          * protection data segment.
2610                          */
2611                         if ((lpfc_cmd->prot_seg_cnt * 4) >
2612                             (phba->cfg_total_seg_cnt - 2)) {
2613                                 ret = 2;
2614                                 goto err;
2615                         }
2616
2617                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2618                                         datasegcnt, protsegcnt);
2619                         /* we should have 3 or more entries in buffer list */
2620                         if ((num_bde < 3) ||
2621                             (num_bde > phba->cfg_total_seg_cnt)) {
2622                                 ret = 2;
2623                                 goto err;
2624                         }
2625                         break;
2626
2627                 case LPFC_PG_TYPE_INVALID:
2628                 default:
2629                         scsi_dma_unmap(scsi_cmnd);
2630                         lpfc_cmd->seg_cnt = 0;
2631
2632                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2633                                         "9022 Unexpected protection group %i\n",
2634                                         prot_group_type);
2635                         return 2;
2636                 }
2637         }
2638
2639         /*
2640          * Finish initializing those IOCB fields that are dependent on the
2641          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2642          * reinitialized since all iocb memory resources are used many times
2643          * for transmit, receive, and continuation bpl's.
2644          */
2645         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2646         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2647         iocb_cmd->ulpBdeCount = 1;
2648         iocb_cmd->ulpLe = 1;
2649
2650         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2651         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2652
2653         /*
2654          * Due to difference in data length between DIF/non-DIF paths,
2655          * we need to set word 4 of IOCB here
2656          */
2657         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2658
2659         /*
2660          * For First burst, we may need to adjust the initial transfer
2661          * length for DIF
2662          */
2663         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2664             (fcpdl < vport->cfg_first_burst_size))
2665                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2666
2667         return 0;
2668 err:
2669         if (lpfc_cmd->seg_cnt)
2670                 scsi_dma_unmap(scsi_cmnd);
2671         if (lpfc_cmd->prot_seg_cnt)
2672                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2673                              scsi_prot_sg_count(scsi_cmnd),
2674                              scsi_cmnd->sc_data_direction);
2675
2676         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2677                         "9023 Cannot setup S/G List for HBA"
2678                         "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2679                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2680                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2681                         prot_group_type, num_bde);
2682
2683         lpfc_cmd->seg_cnt = 0;
2684         lpfc_cmd->prot_seg_cnt = 0;
2685         return ret;
2686 }
2687
2688 /*
2689  * This function calcuates the T10 DIF guard tag
2690  * on the specified data using a CRC algorithmn
2691  * using crc_t10dif.
2692  */
2693 static uint16_t
2694 lpfc_bg_crc(uint8_t *data, int count)
2695 {
2696         uint16_t crc = 0;
2697         uint16_t x;
2698
2699         crc = crc_t10dif(data, count);
2700         x = cpu_to_be16(crc);
2701         return x;
2702 }
2703
2704 /*
2705  * This function calcuates the T10 DIF guard tag
2706  * on the specified data using a CSUM algorithmn
2707  * using ip_compute_csum.
2708  */
2709 static uint16_t
2710 lpfc_bg_csum(uint8_t *data, int count)
2711 {
2712         uint16_t ret;
2713
2714         ret = ip_compute_csum(data, count);
2715         return ret;
2716 }
2717
2718 /*
2719  * This function examines the protection data to try to determine
2720  * what type of T10-DIF error occurred.
2721  */
2722 static void
2723 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2724 {
2725         struct scatterlist *sgpe; /* s/g prot entry */
2726         struct scatterlist *sgde; /* s/g data entry */
2727         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2728         struct scsi_dif_tuple *src = NULL;
2729         uint8_t *data_src = NULL;
2730         uint16_t guard_tag;
2731         uint16_t start_app_tag, app_tag;
2732         uint32_t start_ref_tag, ref_tag;
2733         int prot, protsegcnt;
2734         int err_type, len, data_len;
2735         int chk_ref, chk_app, chk_guard;
2736         uint16_t sum;
2737         unsigned blksize;
2738
2739         err_type = BGS_GUARD_ERR_MASK;
2740         sum = 0;
2741         guard_tag = 0;
2742
2743         /* First check to see if there is protection data to examine */
2744         prot = scsi_get_prot_op(cmd);
2745         if ((prot == SCSI_PROT_READ_STRIP) ||
2746             (prot == SCSI_PROT_WRITE_INSERT) ||
2747             (prot == SCSI_PROT_NORMAL))
2748                 goto out;
2749
2750         /* Currently the driver just supports ref_tag and guard_tag checking */
2751         chk_ref = 1;
2752         chk_app = 0;
2753         chk_guard = 0;
2754
2755         /* Setup a ptr to the protection data provided by the SCSI host */
2756         sgpe = scsi_prot_sglist(cmd);
2757         protsegcnt = lpfc_cmd->prot_seg_cnt;
2758
2759         if (sgpe && protsegcnt) {
2760
2761                 /*
2762                  * We will only try to verify guard tag if the segment
2763                  * data length is a multiple of the blksize.
2764                  */
2765                 sgde = scsi_sglist(cmd);
2766                 blksize = lpfc_cmd_blksize(cmd);
2767                 data_src = (uint8_t *)sg_virt(sgde);
2768                 data_len = sgde->length;
2769                 if ((data_len & (blksize - 1)) == 0)
2770                         chk_guard = 1;
2771
2772                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2773                 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2774                 start_app_tag = src->app_tag;
2775                 len = sgpe->length;
2776                 while (src && protsegcnt) {
2777                         while (len) {
2778
2779                                 /*
2780                                  * First check to see if a protection data
2781                                  * check is valid
2782                                  */
2783                                 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2784                                     (src->app_tag == T10_PI_APP_ESCAPE)) {
2785                                         start_ref_tag++;
2786                                         goto skipit;
2787                                 }
2788
2789                                 /* First Guard Tag checking */
2790                                 if (chk_guard) {
2791                                         guard_tag = src->guard_tag;
2792                                         if (lpfc_cmd_guard_csum(cmd))
2793                                                 sum = lpfc_bg_csum(data_src,
2794                                                                    blksize);
2795                                         else
2796                                                 sum = lpfc_bg_crc(data_src,
2797                                                                   blksize);
2798                                         if ((guard_tag != sum)) {
2799                                                 err_type = BGS_GUARD_ERR_MASK;
2800                                                 goto out;
2801                                         }
2802                                 }
2803
2804                                 /* Reference Tag checking */
2805                                 ref_tag = be32_to_cpu(src->ref_tag);
2806                                 if (chk_ref && (ref_tag != start_ref_tag)) {
2807                                         err_type = BGS_REFTAG_ERR_MASK;
2808                                         goto out;
2809                                 }
2810                                 start_ref_tag++;
2811
2812                                 /* App Tag checking */
2813                                 app_tag = src->app_tag;
2814                                 if (chk_app && (app_tag != start_app_tag)) {
2815                                         err_type = BGS_APPTAG_ERR_MASK;
2816                                         goto out;
2817                                 }
2818 skipit:
2819                                 len -= sizeof(struct scsi_dif_tuple);
2820                                 if (len < 0)
2821                                         len = 0;
2822                                 src++;
2823
2824                                 data_src += blksize;
2825                                 data_len -= blksize;
2826
2827                                 /*
2828                                  * Are we at the end of the Data segment?
2829                                  * The data segment is only used for Guard
2830                                  * tag checking.
2831                                  */
2832                                 if (chk_guard && (data_len == 0)) {
2833                                         chk_guard = 0;
2834                                         sgde = sg_next(sgde);
2835                                         if (!sgde)
2836                                                 goto out;
2837
2838                                         data_src = (uint8_t *)sg_virt(sgde);
2839                                         data_len = sgde->length;
2840                                         if ((data_len & (blksize - 1)) == 0)
2841                                                 chk_guard = 1;
2842                                 }
2843                         }
2844
2845                         /* Goto the next Protection data segment */
2846                         sgpe = sg_next(sgpe);
2847                         if (sgpe) {
2848                                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2849                                 len = sgpe->length;
2850                         } else {
2851                                 src = NULL;
2852                         }
2853                         protsegcnt--;
2854                 }
2855         }
2856 out:
2857         if (err_type == BGS_GUARD_ERR_MASK) {
2858                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2859                                         0x10, 0x1);
2860                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2861                               SAM_STAT_CHECK_CONDITION;
2862                 phba->bg_guard_err_cnt++;
2863                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2864                                 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2865                                 (unsigned long)scsi_get_lba(cmd),
2866                                 sum, guard_tag);
2867
2868         } else if (err_type == BGS_REFTAG_ERR_MASK) {
2869                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2870                                         0x10, 0x3);
2871                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2872                               SAM_STAT_CHECK_CONDITION;
2873
2874                 phba->bg_reftag_err_cnt++;
2875                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2876                                 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2877                                 (unsigned long)scsi_get_lba(cmd),
2878                                 ref_tag, start_ref_tag);
2879
2880         } else if (err_type == BGS_APPTAG_ERR_MASK) {
2881                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2882                                         0x10, 0x2);
2883                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2884                               SAM_STAT_CHECK_CONDITION;
2885
2886                 phba->bg_apptag_err_cnt++;
2887                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2888                                 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2889                                 (unsigned long)scsi_get_lba(cmd),
2890                                 app_tag, start_app_tag);
2891         }
2892 }
2893
2894 /*
2895  * This function checks for BlockGuard errors detected by
2896  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2897  * sense buffer will be set accordingly, paired with
2898  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2899  * detected corruption.
2900  *
2901  * Returns:
2902  *  0 - No error found
2903  *  1 - BlockGuard error found
2904  * -1 - Internal error (bad profile, ...etc)
2905  */
2906 static int
2907 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2908                        struct lpfc_wcqe_complete *wcqe)
2909 {
2910         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2911         int ret = 0;
2912         u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2913         u32 bghm = 0;
2914         u32 bgstat = 0;
2915         u64 failing_sector = 0;
2916
2917         if (status == CQE_STATUS_DI_ERROR) {
2918                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
2919                         bgstat |= BGS_GUARD_ERR_MASK;
2920                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
2921                         bgstat |= BGS_APPTAG_ERR_MASK;
2922                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
2923                         bgstat |= BGS_REFTAG_ERR_MASK;
2924
2925                 /* Check to see if there was any good data before the error */
2926                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2927                         bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2928                         bghm = wcqe->total_data_placed;
2929                 }
2930
2931                 /*
2932                  * Set ALL the error bits to indicate we don't know what
2933                  * type of error it is.
2934                  */
2935                 if (!bgstat)
2936                         bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2937                                 BGS_GUARD_ERR_MASK);
2938         }
2939
2940         if (lpfc_bgs_get_guard_err(bgstat)) {
2941                 ret = 1;
2942
2943                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2944                                         0x10, 0x1);
2945                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2946                               SAM_STAT_CHECK_CONDITION;
2947                 phba->bg_guard_err_cnt++;
2948                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2949                                 "9059 BLKGRD: Guard Tag error in cmd"
2950                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2951                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2952                                 (unsigned long long)scsi_get_lba(cmd),
2953                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2954         }
2955
2956         if (lpfc_bgs_get_reftag_err(bgstat)) {
2957                 ret = 1;
2958
2959                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2960                                         0x10, 0x3);
2961                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2962                               SAM_STAT_CHECK_CONDITION;
2963
2964                 phba->bg_reftag_err_cnt++;
2965                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2966                                 "9060 BLKGRD: Ref Tag error in cmd"
2967                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2968                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2969                                 (unsigned long long)scsi_get_lba(cmd),
2970                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2971         }
2972
2973         if (lpfc_bgs_get_apptag_err(bgstat)) {
2974                 ret = 1;
2975
2976                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2977                                         0x10, 0x2);
2978                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2979                               SAM_STAT_CHECK_CONDITION;
2980
2981                 phba->bg_apptag_err_cnt++;
2982                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2983                                 "9062 BLKGRD: App Tag error in cmd"
2984                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2985                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2986                                 (unsigned long long)scsi_get_lba(cmd),
2987                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2988         }
2989
2990         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2991                 /*
2992                  * setup sense data descriptor 0 per SPC-4 as an information
2993                  * field, and put the failing LBA in it.
2994                  * This code assumes there was also a guard/app/ref tag error
2995                  * indication.
2996                  */
2997                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2998                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2999                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3000                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3001
3002                 /* bghm is a "on the wire" FC frame based count */
3003                 switch (scsi_get_prot_op(cmd)) {
3004                 case SCSI_PROT_READ_INSERT:
3005                 case SCSI_PROT_WRITE_STRIP:
3006                         bghm /= cmd->device->sector_size;
3007                         break;
3008                 case SCSI_PROT_READ_STRIP:
3009                 case SCSI_PROT_WRITE_INSERT:
3010                 case SCSI_PROT_READ_PASS:
3011                 case SCSI_PROT_WRITE_PASS:
3012                         bghm /= (cmd->device->sector_size +
3013                                 sizeof(struct scsi_dif_tuple));
3014                         break;
3015                 }
3016
3017                 failing_sector = scsi_get_lba(cmd);
3018                 failing_sector += bghm;
3019
3020                 /* Descriptor Information */
3021                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3022         }
3023
3024         if (!ret) {
3025                 /* No error was reported - problem in FW? */
3026                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3027                                 "9068 BLKGRD: Unknown error in cmd"
3028                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3029                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3030                                 (unsigned long long)scsi_get_lba(cmd),
3031                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3032
3033                 /* Calcuate what type of error it was */
3034                 lpfc_calc_bg_err(phba, lpfc_cmd);
3035         }
3036         return ret;
3037 }
3038
3039 /*
3040  * This function checks for BlockGuard errors detected by
3041  * the HBA.  In case of errors, the ASC/ASCQ fields in the
3042  * sense buffer will be set accordingly, paired with
3043  * ILLEGAL_REQUEST to signal to the kernel that the HBA
3044  * detected corruption.
3045  *
3046  * Returns:
3047  *  0 - No error found
3048  *  1 - BlockGuard error found
3049  * -1 - Internal error (bad profile, ...etc)
3050  */
3051 static int
3052 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3053                   struct lpfc_iocbq *pIocbOut)
3054 {
3055         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3056         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3057         int ret = 0;
3058         uint32_t bghm = bgf->bghm;
3059         uint32_t bgstat = bgf->bgstat;
3060         uint64_t failing_sector = 0;
3061
3062         if (lpfc_bgs_get_invalid_prof(bgstat)) {
3063                 cmd->result = DID_ERROR << 16;
3064                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3065                                 "9072 BLKGRD: Invalid BG Profile in cmd"
3066                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3067                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3068                                 (unsigned long long)scsi_get_lba(cmd),
3069                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3070                 ret = (-1);
3071                 goto out;
3072         }
3073
3074         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3075                 cmd->result = DID_ERROR << 16;
3076                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3077                                 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3078                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3079                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3080                                 (unsigned long long)scsi_get_lba(cmd),
3081                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3082                 ret = (-1);
3083                 goto out;
3084         }
3085
3086         if (lpfc_bgs_get_guard_err(bgstat)) {
3087                 ret = 1;
3088
3089                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3090                                 0x10, 0x1);
3091                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3092                               SAM_STAT_CHECK_CONDITION;
3093                 phba->bg_guard_err_cnt++;
3094                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3095                                 "9055 BLKGRD: Guard Tag error in cmd"
3096                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3097                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3098                                 (unsigned long long)scsi_get_lba(cmd),
3099                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3100         }
3101
3102         if (lpfc_bgs_get_reftag_err(bgstat)) {
3103                 ret = 1;
3104
3105                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3106                                 0x10, 0x3);
3107                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3108                               SAM_STAT_CHECK_CONDITION;
3109
3110                 phba->bg_reftag_err_cnt++;
3111                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3112                                 "9056 BLKGRD: Ref Tag error in cmd"
3113                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3114                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3115                                 (unsigned long long)scsi_get_lba(cmd),
3116                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3117         }
3118
3119         if (lpfc_bgs_get_apptag_err(bgstat)) {
3120                 ret = 1;
3121
3122                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3123                                 0x10, 0x2);
3124                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3125                               SAM_STAT_CHECK_CONDITION;
3126
3127                 phba->bg_apptag_err_cnt++;
3128                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3129                                 "9061 BLKGRD: App Tag error in cmd"
3130                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3131                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3132                                 (unsigned long long)scsi_get_lba(cmd),
3133                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3134         }
3135
3136         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3137                 /*
3138                  * setup sense data descriptor 0 per SPC-4 as an information
3139                  * field, and put the failing LBA in it.
3140                  * This code assumes there was also a guard/app/ref tag error
3141                  * indication.
3142                  */
3143                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3144                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3145                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3146                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3147
3148                 /* bghm is a "on the wire" FC frame based count */
3149                 switch (scsi_get_prot_op(cmd)) {
3150                 case SCSI_PROT_READ_INSERT:
3151                 case SCSI_PROT_WRITE_STRIP:
3152                         bghm /= cmd->device->sector_size;
3153                         break;
3154                 case SCSI_PROT_READ_STRIP:
3155                 case SCSI_PROT_WRITE_INSERT:
3156                 case SCSI_PROT_READ_PASS:
3157                 case SCSI_PROT_WRITE_PASS:
3158                         bghm /= (cmd->device->sector_size +
3159                                 sizeof(struct scsi_dif_tuple));
3160                         break;
3161                 }
3162
3163                 failing_sector = scsi_get_lba(cmd);
3164                 failing_sector += bghm;
3165
3166                 /* Descriptor Information */
3167                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3168         }
3169
3170         if (!ret) {
3171                 /* No error was reported - problem in FW? */
3172                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3173                                 "9057 BLKGRD: Unknown error in cmd"
3174                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3175                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3176                                 (unsigned long long)scsi_get_lba(cmd),
3177                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3178
3179                 /* Calcuate what type of error it was */
3180                 lpfc_calc_bg_err(phba, lpfc_cmd);
3181         }
3182 out:
3183         return ret;
3184 }
3185
3186 /**
3187  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3188  * @phba: The Hba for which this call is being executed.
3189  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3190  *
3191  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3192  * field of @lpfc_cmd for device with SLI-4 interface spec.
3193  *
3194  * Return codes:
3195  *      2 - Error - Do not retry
3196  *      1 - Error - Retry
3197  *      0 - Success
3198  **/
3199 static int
3200 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3201 {
3202         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3203         struct scatterlist *sgel = NULL;
3204         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3205         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3206         struct sli4_sge *first_data_sgl;
3207         struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3208         struct lpfc_vport *vport = phba->pport;
3209         union lpfc_wqe128 *wqe = &pwqeq->wqe;
3210         dma_addr_t physaddr;
3211         uint32_t num_bde = 0;
3212         uint32_t dma_len;
3213         uint32_t dma_offset = 0;
3214         int nseg, i, j;
3215         struct ulp_bde64 *bde;
3216         bool lsp_just_set = false;
3217         struct sli4_hybrid_sgl *sgl_xtra = NULL;
3218
3219         /*
3220          * There are three possibilities here - use scatter-gather segment, use
3221          * the single mapping, or neither.  Start the lpfc command prep by
3222          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3223          * data bde entry.
3224          */
3225         if (scsi_sg_count(scsi_cmnd)) {
3226                 /*
3227                  * The driver stores the segment count returned from pci_map_sg
3228                  * because this a count of dma-mappings used to map the use_sg
3229                  * pages.  They are not guaranteed to be the same for those
3230                  * architectures that implement an IOMMU.
3231                  */
3232
3233                 nseg = scsi_dma_map(scsi_cmnd);
3234                 if (unlikely(nseg <= 0))
3235                         return 1;
3236                 sgl += 1;
3237                 /* clear the last flag in the fcp_rsp map entry */
3238                 sgl->word2 = le32_to_cpu(sgl->word2);
3239                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3240                 sgl->word2 = cpu_to_le32(sgl->word2);
3241                 sgl += 1;
3242                 first_data_sgl = sgl;
3243                 lpfc_cmd->seg_cnt = nseg;
3244                 if (!phba->cfg_xpsgl &&
3245                     lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3246                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3247                                         "9074 BLKGRD:"
3248                                         " %s: Too many sg segments from "
3249                                         "dma_map_sg.  Config %d, seg_cnt %d\n",
3250                                         __func__, phba->cfg_sg_seg_cnt,
3251                                         lpfc_cmd->seg_cnt);
3252                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3253                         lpfc_cmd->seg_cnt = 0;
3254                         scsi_dma_unmap(scsi_cmnd);
3255                         return 2;
3256                 }
3257
3258                 /*
3259                  * The driver established a maximum scatter-gather segment count
3260                  * during probe that limits the number of sg elements in any
3261                  * single scsi command.  Just run through the seg_cnt and format
3262                  * the sge's.
3263                  * When using SLI-3 the driver will try to fit all the BDEs into
3264                  * the IOCB. If it can't then the BDEs get added to a BPL as it
3265                  * does for SLI-2 mode.
3266                  */
3267
3268                 /* for tracking segment boundaries */
3269                 sgel = scsi_sglist(scsi_cmnd);
3270                 j = 2;
3271                 for (i = 0; i < nseg; i++) {
3272                         sgl->word2 = 0;
3273                         if ((num_bde + 1) == nseg) {
3274                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3275                                 bf_set(lpfc_sli4_sge_type, sgl,
3276                                        LPFC_SGE_TYPE_DATA);
3277                         } else {
3278                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3279
3280                                 /* do we need to expand the segment */
3281                                 if (!lsp_just_set &&
3282                                     !((j + 1) % phba->border_sge_num) &&
3283                                     ((nseg - 1) != i)) {
3284                                         /* set LSP type */
3285                                         bf_set(lpfc_sli4_sge_type, sgl,
3286                                                LPFC_SGE_TYPE_LSP);
3287
3288                                         sgl_xtra = lpfc_get_sgl_per_hdwq(
3289                                                         phba, lpfc_cmd);
3290
3291                                         if (unlikely(!sgl_xtra)) {
3292                                                 lpfc_cmd->seg_cnt = 0;
3293                                                 scsi_dma_unmap(scsi_cmnd);
3294                                                 return 1;
3295                                         }
3296                                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
3297                                                        sgl_xtra->dma_phys_sgl));
3298                                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3299                                                        sgl_xtra->dma_phys_sgl));
3300
3301                                 } else {
3302                                         bf_set(lpfc_sli4_sge_type, sgl,
3303                                                LPFC_SGE_TYPE_DATA);
3304                                 }
3305                         }
3306
3307                         if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3308                                      LPFC_SGE_TYPE_LSP)) {
3309                                 if ((nseg - 1) == i)
3310                                         bf_set(lpfc_sli4_sge_last, sgl, 1);
3311
3312                                 physaddr = sg_dma_address(sgel);
3313                                 dma_len = sg_dma_len(sgel);
3314                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3315                                                            physaddr));
3316                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3317                                                            physaddr));
3318
3319                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3320                                 sgl->word2 = cpu_to_le32(sgl->word2);
3321                                 sgl->sge_len = cpu_to_le32(dma_len);
3322
3323                                 dma_offset += dma_len;
3324                                 sgel = sg_next(sgel);
3325
3326                                 sgl++;
3327                                 lsp_just_set = false;
3328
3329                         } else {
3330                                 sgl->word2 = cpu_to_le32(sgl->word2);
3331                                 sgl->sge_len = cpu_to_le32(
3332                                                      phba->cfg_sg_dma_buf_size);
3333
3334                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3335                                 i = i - 1;
3336
3337                                 lsp_just_set = true;
3338                         }
3339
3340                         j++;
3341                 }
3342                 /*
3343                  * Setup the first Payload BDE. For FCoE we just key off
3344                  * Performance Hints, for FC we use lpfc_enable_pbde.
3345                  * We populate words 13-15 of IOCB/WQE.
3346                  */
3347                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3348                     phba->cfg_enable_pbde) {
3349                         bde = (struct ulp_bde64 *)
3350                                 &wqe->words[13];
3351                         bde->addrLow = first_data_sgl->addr_lo;
3352                         bde->addrHigh = first_data_sgl->addr_hi;
3353                         bde->tus.f.bdeSize =
3354                                         le32_to_cpu(first_data_sgl->sge_len);
3355                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3356                         bde->tus.w = cpu_to_le32(bde->tus.w);
3357
3358                 } else {
3359                         memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3360                 }
3361         } else {
3362                 sgl += 1;
3363                 /* clear the last flag in the fcp_rsp map entry */
3364                 sgl->word2 = le32_to_cpu(sgl->word2);
3365                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3366                 sgl->word2 = cpu_to_le32(sgl->word2);
3367
3368                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3369                     phba->cfg_enable_pbde) {
3370                         bde = (struct ulp_bde64 *)
3371                                 &wqe->words[13];
3372                         memset(bde, 0, (sizeof(uint32_t) * 3));
3373                 }
3374         }
3375
3376         /* Word 11 */
3377         if (phba->cfg_enable_pbde)
3378                 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3379
3380         /*
3381          * Finish initializing those IOCB fields that are dependent on the
3382          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3383          * explicitly reinitialized.
3384          * all iocb memory resources are reused.
3385          */
3386         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3387         /* Set first-burst provided it was successfully negotiated */
3388         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3389             vport->cfg_first_burst_size &&
3390             scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3391                 u32 init_len, total_len;
3392
3393                 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3394                 init_len = min(total_len, vport->cfg_first_burst_size);
3395
3396                 /* Word 4 & 5 */
3397                 wqe->fcp_iwrite.initial_xfer_len = init_len;
3398                 wqe->fcp_iwrite.total_xfer_len = total_len;
3399         } else {
3400                 /* Word 4 */
3401                 wqe->fcp_iwrite.total_xfer_len =
3402                         be32_to_cpu(fcp_cmnd->fcpDl);
3403         }
3404
3405         /*
3406          * If the OAS driver feature is enabled and the lun is enabled for
3407          * OAS, set the oas iocb related flags.
3408          */
3409         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3410                 scsi_cmnd->device->hostdata)->oas_enabled) {
3411                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3412                 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3413                         scsi_cmnd->device->hostdata)->priority;
3414
3415                 /* Word 10 */
3416                 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3417                 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3418
3419                 if (lpfc_cmd->cur_iocbq.priority)
3420                         bf_set(wqe_ccp, &wqe->generic.wqe_com,
3421                                (lpfc_cmd->cur_iocbq.priority << 1));
3422                 else
3423                         bf_set(wqe_ccp, &wqe->generic.wqe_com,
3424                                (phba->cfg_XLanePriority << 1));
3425         }
3426
3427         return 0;
3428 }
3429
3430 /**
3431  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3432  * @phba: The Hba for which this call is being executed.
3433  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3434  *
3435  * This is the protection/DIF aware version of
3436  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3437  * two functions eventually, but for now, it's here
3438  * Return codes:
3439  *      2 - Error - Do not retry
3440  *      1 - Error - Retry
3441  *      0 - Success
3442  **/
3443 static int
3444 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3445                 struct lpfc_io_buf *lpfc_cmd)
3446 {
3447         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3448         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3449         struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3450         struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3451         union lpfc_wqe128 *wqe = &pwqeq->wqe;
3452         uint32_t num_sge = 0;
3453         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3454         int prot_group_type = 0;
3455         int fcpdl;
3456         int ret = 1;
3457         struct lpfc_vport *vport = phba->pport;
3458
3459         /*
3460          * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3461          *  fcp_rsp regions to the first data sge entry
3462          */
3463         if (scsi_sg_count(scsi_cmnd)) {
3464                 /*
3465                  * The driver stores the segment count returned from pci_map_sg
3466                  * because this a count of dma-mappings used to map the use_sg
3467                  * pages.  They are not guaranteed to be the same for those
3468                  * architectures that implement an IOMMU.
3469                  */
3470                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3471                                         scsi_sglist(scsi_cmnd),
3472                                         scsi_sg_count(scsi_cmnd), datadir);
3473                 if (unlikely(!datasegcnt))
3474                         return 1;
3475
3476                 sgl += 1;
3477                 /* clear the last flag in the fcp_rsp map entry */
3478                 sgl->word2 = le32_to_cpu(sgl->word2);
3479                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3480                 sgl->word2 = cpu_to_le32(sgl->word2);
3481
3482                 sgl += 1;
3483                 lpfc_cmd->seg_cnt = datasegcnt;
3484
3485                 /* First check if data segment count from SCSI Layer is good */
3486                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3487                     !phba->cfg_xpsgl) {
3488                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3489                         ret = 2;
3490                         goto err;
3491                 }
3492
3493                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3494
3495                 switch (prot_group_type) {
3496                 case LPFC_PG_TYPE_NO_DIF:
3497                         /* Here we need to add a DISEED to the count */
3498                         if (((lpfc_cmd->seg_cnt + 1) >
3499                                         phba->cfg_total_seg_cnt) &&
3500                             !phba->cfg_xpsgl) {
3501                                 ret = 2;
3502                                 goto err;
3503                         }
3504
3505                         num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3506                                         datasegcnt, lpfc_cmd);
3507
3508                         /* we should have 2 or more entries in buffer list */
3509                         if (num_sge < 2) {
3510                                 ret = 2;
3511                                 goto err;
3512                         }
3513                         break;
3514
3515                 case LPFC_PG_TYPE_DIF_BUF:
3516                         /*
3517                          * This type indicates that protection buffers are
3518                          * passed to the driver, so that needs to be prepared
3519                          * for DMA
3520                          */
3521                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
3522                                         scsi_prot_sglist(scsi_cmnd),
3523                                         scsi_prot_sg_count(scsi_cmnd), datadir);
3524                         if (unlikely(!protsegcnt)) {
3525                                 scsi_dma_unmap(scsi_cmnd);
3526                                 return 1;
3527                         }
3528
3529                         lpfc_cmd->prot_seg_cnt = protsegcnt;
3530                         /*
3531                          * There is a minimun of 3 SGEs used for every
3532                          * protection data segment.
3533                          */
3534                         if (((lpfc_cmd->prot_seg_cnt * 3) >
3535                                         (phba->cfg_total_seg_cnt - 2)) &&
3536                             !phba->cfg_xpsgl) {
3537                                 ret = 2;
3538                                 goto err;
3539                         }
3540
3541                         num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3542                                         datasegcnt, protsegcnt, lpfc_cmd);
3543
3544                         /* we should have 3 or more entries in buffer list */
3545                         if (num_sge < 3 ||
3546                             (num_sge > phba->cfg_total_seg_cnt &&
3547                              !phba->cfg_xpsgl)) {
3548                                 ret = 2;
3549                                 goto err;
3550                         }
3551                         break;
3552
3553                 case LPFC_PG_TYPE_INVALID:
3554                 default:
3555                         scsi_dma_unmap(scsi_cmnd);
3556                         lpfc_cmd->seg_cnt = 0;
3557
3558                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3559                                         "9083 Unexpected protection group %i\n",
3560                                         prot_group_type);
3561                         return 2;
3562                 }
3563         }
3564
3565         switch (scsi_get_prot_op(scsi_cmnd)) {
3566         case SCSI_PROT_WRITE_STRIP:
3567         case SCSI_PROT_READ_STRIP:
3568                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3569                 break;
3570         case SCSI_PROT_WRITE_INSERT:
3571         case SCSI_PROT_READ_INSERT:
3572                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3573                 break;
3574         case SCSI_PROT_WRITE_PASS:
3575         case SCSI_PROT_READ_PASS:
3576                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3577                 break;
3578         }
3579
3580         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3581         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3582
3583         /* Set first-burst provided it was successfully negotiated */
3584         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3585             vport->cfg_first_burst_size &&
3586             scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3587                 u32 init_len, total_len;
3588
3589                 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3590                 init_len = min(total_len, vport->cfg_first_burst_size);
3591
3592                 /* Word 4 & 5 */
3593                 wqe->fcp_iwrite.initial_xfer_len = init_len;
3594                 wqe->fcp_iwrite.total_xfer_len = total_len;
3595         } else {
3596                 /* Word 4 */
3597                 wqe->fcp_iwrite.total_xfer_len =
3598                         be32_to_cpu(fcp_cmnd->fcpDl);
3599         }
3600
3601         /*
3602          * If the OAS driver feature is enabled and the lun is enabled for
3603          * OAS, set the oas iocb related flags.
3604          */
3605         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3606                 scsi_cmnd->device->hostdata)->oas_enabled) {
3607                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3608
3609                 /* Word 10 */
3610                 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3611                 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3612                 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3613                        (phba->cfg_XLanePriority << 1));
3614         }
3615
3616         /* Word 7. DIF Flags */
3617         if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3618                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3619         else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3620                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3621         else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3622                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3623
3624         lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3625                                  LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3626
3627         return 0;
3628 err:
3629         if (lpfc_cmd->seg_cnt)
3630                 scsi_dma_unmap(scsi_cmnd);
3631         if (lpfc_cmd->prot_seg_cnt)
3632                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633                              scsi_prot_sg_count(scsi_cmnd),
3634                              scsi_cmnd->sc_data_direction);
3635
3636         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3637                         "9084 Cannot setup S/G List for HBA"
3638                         "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3639                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641                         prot_group_type, num_sge);
3642
3643         lpfc_cmd->seg_cnt = 0;
3644         lpfc_cmd->prot_seg_cnt = 0;
3645         return ret;
3646 }
3647
3648 /**
3649  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3650  * @phba: The Hba for which this call is being executed.
3651  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3652  *
3653  * This routine wraps the actual DMA mapping function pointer from the
3654  * lpfc_hba struct.
3655  *
3656  * Return codes:
3657  *      1 - Error
3658  *      0 - Success
3659  **/
3660 static inline int
3661 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3662 {
3663         return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3664 }
3665
3666 /**
3667  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3668  * using BlockGuard.
3669  * @phba: The Hba for which this call is being executed.
3670  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3671  *
3672  * This routine wraps the actual DMA mapping function pointer from the
3673  * lpfc_hba struct.
3674  *
3675  * Return codes:
3676  *      1 - Error
3677  *      0 - Success
3678  **/
3679 static inline int
3680 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3681 {
3682         return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3683 }
3684
3685 /**
3686  * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3687  * buffer
3688  * @phba: The Hba for which this call is being executed.
3689  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3690  * @tmo: Timeout value for IO
3691  *
3692  * This routine initializes IOCB/WQE data structure from scsi command
3693  *
3694  * Return codes:
3695  *      1 - Error
3696  *      0 - Success
3697  **/
3698 static inline int
3699 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3700                         uint8_t tmo)
3701 {
3702         return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3703 }
3704
3705 /**
3706  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3707  * @phba: Pointer to hba context object.
3708  * @vport: Pointer to vport object.
3709  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3710  * @rsp_iocb: Pointer to response iocb object which reported error.
3711  *
3712  * This function posts an event when there is a SCSI command reporting
3713  * error from the scsi device.
3714  **/
3715 static void
3716 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3717                 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3718         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3719         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3720         uint32_t resp_info = fcprsp->rspStatus2;
3721         uint32_t scsi_status = fcprsp->rspStatus3;
3722         struct lpfc_fast_path_event *fast_path_evt = NULL;
3723         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3724         unsigned long flags;
3725
3726         if (!pnode)
3727                 return;
3728
3729         /* If there is queuefull or busy condition send a scsi event */
3730         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3731                 (cmnd->result == SAM_STAT_BUSY)) {
3732                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3733                 if (!fast_path_evt)
3734                         return;
3735                 fast_path_evt->un.scsi_evt.event_type =
3736                         FC_REG_SCSI_EVENT;
3737                 fast_path_evt->un.scsi_evt.subcategory =
3738                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3739                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3740                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3741                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3742                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3743                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3744                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3745         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3746                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3747                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3748                 if (!fast_path_evt)
3749                         return;
3750                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3751                         FC_REG_SCSI_EVENT;
3752                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3753                         LPFC_EVENT_CHECK_COND;
3754                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3755                         cmnd->device->lun;
3756                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3757                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3758                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3759                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3760                 fast_path_evt->un.check_cond_evt.sense_key =
3761                         cmnd->sense_buffer[2] & 0xf;
3762                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3763                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3764         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3765                      fcpi_parm &&
3766                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3767                         ((scsi_status == SAM_STAT_GOOD) &&
3768                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3769                 /*
3770                  * If status is good or resid does not match with fcp_param and
3771                  * there is valid fcpi_parm, then there is a read_check error
3772                  */
3773                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3774                 if (!fast_path_evt)
3775                         return;
3776                 fast_path_evt->un.read_check_error.header.event_type =
3777                         FC_REG_FABRIC_EVENT;
3778                 fast_path_evt->un.read_check_error.header.subcategory =
3779                         LPFC_EVENT_FCPRDCHKERR;
3780                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3781                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3782                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3783                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3784                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3785                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3786                 fast_path_evt->un.read_check_error.fcpiparam =
3787                         fcpi_parm;
3788         } else
3789                 return;
3790
3791         fast_path_evt->vport = vport;
3792         spin_lock_irqsave(&phba->hbalock, flags);
3793         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3794         spin_unlock_irqrestore(&phba->hbalock, flags);
3795         lpfc_worker_wake_up(phba);
3796         return;
3797 }
3798
3799 /**
3800  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3801  * @phba: The HBA for which this call is being executed.
3802  * @psb: The scsi buffer which is going to be un-mapped.
3803  *
3804  * This routine does DMA un-mapping of scatter gather list of scsi command
3805  * field of @lpfc_cmd for device with SLI-3 interface spec.
3806  **/
3807 static void
3808 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3809 {
3810         /*
3811          * There are only two special cases to consider.  (1) the scsi command
3812          * requested scatter-gather usage or (2) the scsi command allocated
3813          * a request buffer, but did not request use_sg.  There is a third
3814          * case, but it does not require resource deallocation.
3815          */
3816         if (psb->seg_cnt > 0)
3817                 scsi_dma_unmap(psb->pCmd);
3818         if (psb->prot_seg_cnt > 0)
3819                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3820                                 scsi_prot_sg_count(psb->pCmd),
3821                                 psb->pCmd->sc_data_direction);
3822 }
3823
3824 /**
3825  * lpfc_handler_fcp_err - FCP response handler
3826  * @vport: The virtual port for which this call is being executed.
3827  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3828  * @rsp_iocb: The response IOCB which contains FCP error.
3829  *
3830  * This routine is called to process response IOCB with status field
3831  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3832  * based upon SCSI and FCP error.
3833  **/
3834 static void
3835 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3836                     uint32_t fcpi_parm)
3837 {
3838         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3839         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3840         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3841         uint32_t resp_info = fcprsp->rspStatus2;
3842         uint32_t scsi_status = fcprsp->rspStatus3;
3843         uint32_t *lp;
3844         uint32_t host_status = DID_OK;
3845         uint32_t rsplen = 0;
3846         uint32_t fcpDl;
3847         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3848
3849
3850         /*
3851          *  If this is a task management command, there is no
3852          *  scsi packet associated with this lpfc_cmd.  The driver
3853          *  consumes it.
3854          */
3855         if (fcpcmd->fcpCntl2) {
3856                 scsi_status = 0;
3857                 goto out;
3858         }
3859
3860         if (resp_info & RSP_LEN_VALID) {
3861                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3862                 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3863                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3864                                          "2719 Invalid response length: "
3865                                          "tgt x%x lun x%llx cmnd x%x rsplen "
3866                                          "x%x\n", cmnd->device->id,
3867                                          cmnd->device->lun, cmnd->cmnd[0],
3868                                          rsplen);
3869                         host_status = DID_ERROR;
3870                         goto out;
3871                 }
3872                 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3873                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3874                                  "2757 Protocol failure detected during "
3875                                  "processing of FCP I/O op: "
3876                                  "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3877                                  cmnd->device->id,
3878                                  cmnd->device->lun, cmnd->cmnd[0],
3879                                  fcprsp->rspInfo3);
3880                         host_status = DID_ERROR;
3881                         goto out;
3882                 }
3883         }
3884
3885         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3886                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3887                 if (snslen > SCSI_SENSE_BUFFERSIZE)
3888                         snslen = SCSI_SENSE_BUFFERSIZE;
3889
3890                 if (resp_info & RSP_LEN_VALID)
3891                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
3892                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3893         }
3894         lp = (uint32_t *)cmnd->sense_buffer;
3895
3896         /* special handling for under run conditions */
3897         if (!scsi_status && (resp_info & RESID_UNDER)) {
3898                 /* don't log under runs if fcp set... */
3899                 if (vport->cfg_log_verbose & LOG_FCP)
3900                         logit = LOG_FCP_ERROR;
3901                 /* unless operator says so */
3902                 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3903                         logit = LOG_FCP_UNDER;
3904         }
3905
3906         lpfc_printf_vlog(vport, KERN_WARNING, logit,
3907                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
3908                          "Data: x%x x%x x%x x%x x%x\n",
3909                          cmnd->cmnd[0], scsi_status,
3910                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3911                          be32_to_cpu(fcprsp->rspResId),
3912                          be32_to_cpu(fcprsp->rspSnsLen),
3913                          be32_to_cpu(fcprsp->rspRspLen),
3914                          fcprsp->rspInfo3);
3915
3916         scsi_set_resid(cmnd, 0);
3917         fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3918         if (resp_info & RESID_UNDER) {
3919                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3920
3921                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3922                                  "9025 FCP Underrun, expected %d, "
3923                                  "residual %d Data: x%x x%x x%x\n",
3924                                  fcpDl,
3925                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3926                                  cmnd->underflow);
3927
3928                 /*
3929                  * If there is an under run, check if under run reported by
3930                  * storage array is same as the under run reported by HBA.
3931                  * If this is not same, there is a dropped frame.
3932                  */
3933                 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3934                         lpfc_printf_vlog(vport, KERN_WARNING,
3935                                          LOG_FCP | LOG_FCP_ERROR,
3936                                          "9026 FCP Read Check Error "
3937                                          "and Underrun Data: x%x x%x x%x x%x\n",
3938                                          fcpDl,
3939                                          scsi_get_resid(cmnd), fcpi_parm,
3940                                          cmnd->cmnd[0]);
3941                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3942                         host_status = DID_ERROR;
3943                 }
3944                 /*
3945                  * The cmnd->underflow is the minimum number of bytes that must
3946                  * be transferred for this command.  Provided a sense condition
3947                  * is not present, make sure the actual amount transferred is at
3948                  * least the underflow value or fail.
3949                  */
3950                 if (!(resp_info & SNS_LEN_VALID) &&
3951                     (scsi_status == SAM_STAT_GOOD) &&
3952                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3953                      < cmnd->underflow)) {
3954                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3955                                          "9027 FCP command x%x residual "
3956                                          "underrun converted to error "
3957                                          "Data: x%x x%x x%x\n",
3958                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
3959                                          scsi_get_resid(cmnd), cmnd->underflow);
3960                         host_status = DID_ERROR;
3961                 }
3962         } else if (resp_info & RESID_OVER) {
3963                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3964                                  "9028 FCP command x%x residual overrun error. "
3965                                  "Data: x%x x%x\n", cmnd->cmnd[0],
3966                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3967                 host_status = DID_ERROR;
3968
3969         /*
3970          * Check SLI validation that all the transfer was actually done
3971          * (fcpi_parm should be zero). Apply check only to reads.
3972          */
3973         } else if (fcpi_parm) {
3974                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3975                                  "9029 FCP %s Check Error Data: "
3976                                  "x%x x%x x%x x%x x%x\n",
3977                                  ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3978                                  "Read" : "Write"),
3979                                  fcpDl, be32_to_cpu(fcprsp->rspResId),
3980                                  fcpi_parm, cmnd->cmnd[0], scsi_status);
3981
3982                 /* There is some issue with the LPe12000 that causes it
3983                  * to miscalculate the fcpi_parm and falsely trip this
3984                  * recovery logic.  Detect this case and don't error when true.
3985                  */
3986                 if (fcpi_parm > fcpDl)
3987                         goto out;
3988
3989                 switch (scsi_status) {
3990                 case SAM_STAT_GOOD:
3991                 case SAM_STAT_CHECK_CONDITION:
3992                         /* Fabric dropped a data frame. Fail any successful
3993                          * command in which we detected dropped frames.
3994                          * A status of good or some check conditions could
3995                          * be considered a successful command.
3996                          */
3997                         host_status = DID_ERROR;
3998                         break;
3999                 }
4000                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4001         }
4002
4003  out:
4004         cmnd->result = host_status << 16 | scsi_status;
4005         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4006 }
4007
4008 /**
4009  * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4010  * @phba: The hba for which this call is being executed.
4011  * @pwqeIn: The command WQE for the scsi cmnd.
4012  * @pwqeOut: The response WQE for the scsi cmnd.
4013  *
4014  * This routine assigns scsi command result by looking into response WQE
4015  * status field appropriately. This routine handles QUEUE FULL condition as
4016  * well by ramping down device queue depth.
4017  **/
4018 static void
4019 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4020                          struct lpfc_wcqe_complete *wcqe)
4021 {
4022         struct lpfc_io_buf *lpfc_cmd =
4023                 (struct lpfc_io_buf *)pwqeIn->context1;
4024         struct lpfc_vport *vport = pwqeIn->vport;
4025         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4026         struct lpfc_nodelist *ndlp = rdata->pnode;
4027         struct scsi_cmnd *cmd;
4028         unsigned long flags;
4029         struct lpfc_fast_path_event *fast_path_evt;
4030         struct Scsi_Host *shost;
4031         u32 logit = LOG_FCP;
4032         u32 status, idx;
4033         unsigned long iflags = 0;
4034
4035         /* Sanity check on return of outstanding command */
4036         if (!lpfc_cmd) {
4037                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4038                                  "9032 Null lpfc_cmd pointer. No "
4039                                  "release, skip completion\n");
4040                 return;
4041         }
4042
4043         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4044                 /* TOREMOVE - currently this flag is checked during
4045                  * the release of lpfc_iocbq. Remove once we move
4046                  * to lpfc_wqe_job construct.
4047                  *
4048                  * This needs to be done outside buf_lock
4049                  */
4050                 spin_lock_irqsave(&phba->hbalock, iflags);
4051                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4052                 spin_unlock_irqrestore(&phba->hbalock, iflags);
4053         }
4054
4055         /* Guard against abort handler being called at same time */
4056         spin_lock(&lpfc_cmd->buf_lock);
4057
4058         /* Sanity check on return of outstanding command */
4059         cmd = lpfc_cmd->pCmd;
4060         if (!cmd || !phba) {
4061                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4062                                  "9042 I/O completion: Not an active IO\n");
4063                 spin_unlock(&lpfc_cmd->buf_lock);
4064                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4065                 return;
4066         }
4067         idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4068         if (phba->sli4_hba.hdwq)
4069                 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4070
4071 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4072         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4073                 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4074 #endif
4075         shost = cmd->device->host;
4076
4077         status = bf_get(lpfc_wcqe_c_status, wcqe);
4078         lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4079         lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4080
4081         lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4082         if (bf_get(lpfc_wcqe_c_xb, wcqe))
4083                 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4084
4085 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4086         if (lpfc_cmd->prot_data_type) {
4087                 struct scsi_dif_tuple *src = NULL;
4088
4089                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4090                 /*
4091                  * Used to restore any changes to protection
4092                  * data for error injection.
4093                  */
4094                 switch (lpfc_cmd->prot_data_type) {
4095                 case LPFC_INJERR_REFTAG:
4096                         src->ref_tag =
4097                                 lpfc_cmd->prot_data;
4098                         break;
4099                 case LPFC_INJERR_APPTAG:
4100                         src->app_tag =
4101                                 (uint16_t)lpfc_cmd->prot_data;
4102                         break;
4103                 case LPFC_INJERR_GUARD:
4104                         src->guard_tag =
4105                                 (uint16_t)lpfc_cmd->prot_data;
4106                         break;
4107                 default:
4108                         break;
4109                 }
4110
4111                 lpfc_cmd->prot_data = 0;
4112                 lpfc_cmd->prot_data_type = 0;
4113                 lpfc_cmd->prot_data_segment = NULL;
4114         }
4115 #endif
4116         if (unlikely(lpfc_cmd->status)) {
4117                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4118                     (lpfc_cmd->result & IOERR_DRVR_MASK))
4119                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4120                 else if (lpfc_cmd->status >= IOSTAT_CNT)
4121                         lpfc_cmd->status = IOSTAT_DEFAULT;
4122                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4123                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
4124                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4125                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4126                         logit = 0;
4127                 else
4128                         logit = LOG_FCP | LOG_FCP_UNDER;
4129                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4130                                  "9034 FCP cmd x%x failed <%d/%lld> "
4131                                  "status: x%x result: x%x "
4132                                  "sid: x%x did: x%x oxid: x%x "
4133                                  "Data: x%x x%x x%x\n",
4134                                  cmd->cmnd[0],
4135                                  cmd->device ? cmd->device->id : 0xffff,
4136                                  cmd->device ? cmd->device->lun : 0xffff,
4137                                  lpfc_cmd->status, lpfc_cmd->result,
4138                                  vport->fc_myDID,
4139                                  (ndlp) ? ndlp->nlp_DID : 0,
4140                                  lpfc_cmd->cur_iocbq.sli4_xritag,
4141                                  wcqe->parameter, wcqe->total_data_placed,
4142                                  lpfc_cmd->cur_iocbq.iotag);
4143         }
4144
4145         switch (lpfc_cmd->status) {
4146         case IOSTAT_SUCCESS:
4147                 cmd->result = DID_OK << 16;
4148                 break;
4149         case IOSTAT_FCP_RSP_ERROR:
4150                 lpfc_handle_fcp_err(vport, lpfc_cmd,
4151                                     pwqeIn->wqe.fcp_iread.total_xfer_len -
4152                                     wcqe->total_data_placed);
4153                 break;
4154         case IOSTAT_NPORT_BSY:
4155         case IOSTAT_FABRIC_BSY:
4156                 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4157                 fast_path_evt = lpfc_alloc_fast_evt(phba);
4158                 if (!fast_path_evt)
4159                         break;
4160                 fast_path_evt->un.fabric_evt.event_type =
4161                         FC_REG_FABRIC_EVENT;
4162                 fast_path_evt->un.fabric_evt.subcategory =
4163                         (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4164                         LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4165                 if (ndlp) {
4166                         memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4167                                &ndlp->nlp_portname,
4168                                 sizeof(struct lpfc_name));
4169                         memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4170                                &ndlp->nlp_nodename,
4171                                 sizeof(struct lpfc_name));
4172                 }
4173                 fast_path_evt->vport = vport;
4174                 fast_path_evt->work_evt.evt =
4175                         LPFC_EVT_FASTPATH_MGMT_EVT;
4176                 spin_lock_irqsave(&phba->hbalock, flags);
4177                 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4178                               &phba->work_list);
4179                 spin_unlock_irqrestore(&phba->hbalock, flags);
4180                 lpfc_worker_wake_up(phba);
4181                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4182                                  "9035 Fabric/Node busy FCP cmd x%x failed"
4183                                  " <%d/%lld> "
4184                                  "status: x%x result: x%x "
4185                                  "sid: x%x did: x%x oxid: x%x "
4186                                  "Data: x%x x%x x%x\n",
4187                                  cmd->cmnd[0],
4188                                  cmd->device ? cmd->device->id : 0xffff,
4189                                  cmd->device ? cmd->device->lun : 0xffff,
4190                                  lpfc_cmd->status, lpfc_cmd->result,
4191                                  vport->fc_myDID,
4192                                  (ndlp) ? ndlp->nlp_DID : 0,
4193                                  lpfc_cmd->cur_iocbq.sli4_xritag,
4194                                  wcqe->parameter,
4195                                  wcqe->total_data_placed,
4196                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4197                 break;
4198         case IOSTAT_REMOTE_STOP:
4199                 if (ndlp) {
4200                         /* This I/O was aborted by the target, we don't
4201                          * know the rxid and because we did not send the
4202                          * ABTS we cannot generate and RRQ.
4203                          */
4204                         lpfc_set_rrq_active(phba, ndlp,
4205                                             lpfc_cmd->cur_iocbq.sli4_lxritag,
4206                                             0, 0);
4207                 }
4208                 fallthrough;
4209         case IOSTAT_LOCAL_REJECT:
4210                 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4211                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4212                 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4213                     lpfc_cmd->result ==
4214                     IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4215                     lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4216                     lpfc_cmd->result ==
4217                     IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4218                         cmd->result = DID_NO_CONNECT << 16;
4219                         break;
4220                 }
4221                 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4222                     lpfc_cmd->result == IOERR_NO_RESOURCES ||
4223                     lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4224                     lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4225                         cmd->result = DID_REQUEUE << 16;
4226                         break;
4227                 }
4228                 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4229                      lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4230                      status == CQE_STATUS_DI_ERROR) {
4231                         if (scsi_get_prot_op(cmd) !=
4232                             SCSI_PROT_NORMAL) {
4233                                 /*
4234                                  * This is a response for a BG enabled
4235                                  * cmd. Parse BG error
4236                                  */
4237                                 lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4238                                                        wcqe);
4239                                 break;
4240                         }
4241                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4242                                  "9040 non-zero BGSTAT on unprotected cmd\n");
4243                 }
4244                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4245                                  "9036 Local Reject FCP cmd x%x failed"
4246                                  " <%d/%lld> "
4247                                  "status: x%x result: x%x "
4248                                  "sid: x%x did: x%x oxid: x%x "
4249                                  "Data: x%x x%x x%x\n",
4250                                  cmd->cmnd[0],
4251                                  cmd->device ? cmd->device->id : 0xffff,
4252                                  cmd->device ? cmd->device->lun : 0xffff,
4253                                  lpfc_cmd->status, lpfc_cmd->result,
4254                                  vport->fc_myDID,
4255                                  (ndlp) ? ndlp->nlp_DID : 0,
4256                                  lpfc_cmd->cur_iocbq.sli4_xritag,
4257                                  wcqe->parameter,
4258                                  wcqe->total_data_placed,
4259                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4260                 fallthrough;
4261         default:
4262                 if (lpfc_cmd->status >= IOSTAT_CNT)
4263                         lpfc_cmd->status = IOSTAT_DEFAULT;
4264                 cmd->result = DID_ERROR << 16;
4265                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4266                                  "9037 FCP Completion Error: xri %x "
4267                                  "status x%x result x%x [x%x] "
4268                                  "placed x%x\n",
4269                                  lpfc_cmd->cur_iocbq.sli4_xritag,
4270                                  lpfc_cmd->status, lpfc_cmd->result,
4271                                  wcqe->parameter,
4272                                  wcqe->total_data_placed);
4273         }
4274         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4275                 u32 *lp = (u32 *)cmd->sense_buffer;
4276
4277                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4278                                  "9039 Iodone <%d/%llu> cmd x%p, error "
4279                                  "x%x SNS x%x x%x Data: x%x x%x\n",
4280                                  cmd->device->id, cmd->device->lun, cmd,
4281                                  cmd->result, *lp, *(lp + 3), cmd->retries,
4282                                  scsi_get_resid(cmd));
4283         }
4284
4285         lpfc_update_stats(vport, lpfc_cmd);
4286
4287         if (vport->cfg_max_scsicmpl_time &&
4288             time_after(jiffies, lpfc_cmd->start_time +
4289             msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4290                 spin_lock_irqsave(shost->host_lock, flags);
4291                 if (ndlp) {
4292                         if (ndlp->cmd_qdepth >
4293                                 atomic_read(&ndlp->cmd_pending) &&
4294                                 (atomic_read(&ndlp->cmd_pending) >
4295                                 LPFC_MIN_TGT_QDEPTH) &&
4296                                 (cmd->cmnd[0] == READ_10 ||
4297                                 cmd->cmnd[0] == WRITE_10))
4298                                 ndlp->cmd_qdepth =
4299                                         atomic_read(&ndlp->cmd_pending);
4300
4301                         ndlp->last_change_time = jiffies;
4302                 }
4303                 spin_unlock_irqrestore(shost->host_lock, flags);
4304         }
4305         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4306
4307 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4308         if (lpfc_cmd->ts_cmd_start) {
4309                 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4310                 lpfc_cmd->ts_data_io = ktime_get_ns();
4311                 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4312                 lpfc_io_ktime(phba, lpfc_cmd);
4313         }
4314 #endif
4315         lpfc_cmd->pCmd = NULL;
4316         spin_unlock(&lpfc_cmd->buf_lock);
4317
4318         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4319         cmd->scsi_done(cmd);
4320
4321         /*
4322          * If there is an abort thread waiting for command completion
4323          * wake up the thread.
4324          */
4325         spin_lock(&lpfc_cmd->buf_lock);
4326         lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4327         if (lpfc_cmd->waitq)
4328                 wake_up(lpfc_cmd->waitq);
4329         spin_unlock(&lpfc_cmd->buf_lock);
4330
4331         lpfc_release_scsi_buf(phba, lpfc_cmd);
4332 }
4333
4334 /**
4335  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4336  * @phba: The Hba for which this call is being executed.
4337  * @pIocbIn: The command IOCBQ for the scsi cmnd.
4338  * @pIocbOut: The response IOCBQ for the scsi cmnd.
4339  *
4340  * This routine assigns scsi command result by looking into response IOCB
4341  * status field appropriately. This routine handles QUEUE FULL condition as
4342  * well by ramping down device queue depth.
4343  **/
4344 static void
4345 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4346                         struct lpfc_iocbq *pIocbOut)
4347 {
4348         struct lpfc_io_buf *lpfc_cmd =
4349                 (struct lpfc_io_buf *) pIocbIn->context1;
4350         struct lpfc_vport      *vport = pIocbIn->vport;
4351         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4352         struct lpfc_nodelist *pnode = rdata->pnode;
4353         struct scsi_cmnd *cmd;
4354         unsigned long flags;
4355         struct lpfc_fast_path_event *fast_path_evt;
4356         struct Scsi_Host *shost;
4357         int idx;
4358         uint32_t logit = LOG_FCP;
4359
4360         /* Guard against abort handler being called at same time */
4361         spin_lock(&lpfc_cmd->buf_lock);
4362
4363         /* Sanity check on return of outstanding command */
4364         cmd = lpfc_cmd->pCmd;
4365         if (!cmd || !phba) {
4366                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4367                                  "2621 IO completion: Not an active IO\n");
4368                 spin_unlock(&lpfc_cmd->buf_lock);
4369                 return;
4370         }
4371
4372         idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4373         if (phba->sli4_hba.hdwq)
4374                 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4375
4376 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4377         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4378                 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4379 #endif
4380         shost = cmd->device->host;
4381
4382         lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4383         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4384         /* pick up SLI4 exhange busy status from HBA */
4385         if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4386                 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4387         else
4388                 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4389
4390 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4391         if (lpfc_cmd->prot_data_type) {
4392                 struct scsi_dif_tuple *src = NULL;
4393
4394                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4395                 /*
4396                  * Used to restore any changes to protection
4397                  * data for error injection.
4398                  */
4399                 switch (lpfc_cmd->prot_data_type) {
4400                 case LPFC_INJERR_REFTAG:
4401                         src->ref_tag =
4402                                 lpfc_cmd->prot_data;
4403                         break;
4404                 case LPFC_INJERR_APPTAG:
4405                         src->app_tag =
4406                                 (uint16_t)lpfc_cmd->prot_data;
4407                         break;
4408                 case LPFC_INJERR_GUARD:
4409                         src->guard_tag =
4410                                 (uint16_t)lpfc_cmd->prot_data;
4411                         break;
4412                 default:
4413                         break;
4414                 }
4415
4416                 lpfc_cmd->prot_data = 0;
4417                 lpfc_cmd->prot_data_type = 0;
4418                 lpfc_cmd->prot_data_segment = NULL;
4419         }
4420 #endif
4421
4422         if (unlikely(lpfc_cmd->status)) {
4423                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4424                     (lpfc_cmd->result & IOERR_DRVR_MASK))
4425                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4426                 else if (lpfc_cmd->status >= IOSTAT_CNT)
4427                         lpfc_cmd->status = IOSTAT_DEFAULT;
4428                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4429                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
4430                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4431                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4432                         logit = 0;
4433                 else
4434                         logit = LOG_FCP | LOG_FCP_UNDER;
4435                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4436                          "9030 FCP cmd x%x failed <%d/%lld> "
4437                          "status: x%x result: x%x "
4438                          "sid: x%x did: x%x oxid: x%x "
4439                          "Data: x%x x%x\n",
4440                          cmd->cmnd[0],
4441                          cmd->device ? cmd->device->id : 0xffff,
4442                          cmd->device ? cmd->device->lun : 0xffff,
4443                          lpfc_cmd->status, lpfc_cmd->result,
4444                          vport->fc_myDID,
4445                          (pnode) ? pnode->nlp_DID : 0,
4446                          phba->sli_rev == LPFC_SLI_REV4 ?
4447                              lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4448                          pIocbOut->iocb.ulpContext,
4449                          lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4450
4451                 switch (lpfc_cmd->status) {
4452                 case IOSTAT_FCP_RSP_ERROR:
4453                         /* Call FCP RSP handler to determine result */
4454                         lpfc_handle_fcp_err(vport, lpfc_cmd,
4455                                             pIocbOut->iocb.un.fcpi.fcpi_parm);
4456                         break;
4457                 case IOSTAT_NPORT_BSY:
4458                 case IOSTAT_FABRIC_BSY:
4459                         cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4460                         fast_path_evt = lpfc_alloc_fast_evt(phba);
4461                         if (!fast_path_evt)
4462                                 break;
4463                         fast_path_evt->un.fabric_evt.event_type =
4464                                 FC_REG_FABRIC_EVENT;
4465                         fast_path_evt->un.fabric_evt.subcategory =
4466                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4467                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4468                         if (pnode) {
4469                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4470                                         &pnode->nlp_portname,
4471                                         sizeof(struct lpfc_name));
4472                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4473                                         &pnode->nlp_nodename,
4474                                         sizeof(struct lpfc_name));
4475                         }
4476                         fast_path_evt->vport = vport;
4477                         fast_path_evt->work_evt.evt =
4478                                 LPFC_EVT_FASTPATH_MGMT_EVT;
4479                         spin_lock_irqsave(&phba->hbalock, flags);
4480                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
4481                                 &phba->work_list);
4482                         spin_unlock_irqrestore(&phba->hbalock, flags);
4483                         lpfc_worker_wake_up(phba);
4484                         break;
4485                 case IOSTAT_LOCAL_REJECT:
4486                 case IOSTAT_REMOTE_STOP:
4487                         if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4488                             lpfc_cmd->result ==
4489                                         IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4490                             lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4491                             lpfc_cmd->result ==
4492                                         IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4493                                 cmd->result = DID_NO_CONNECT << 16;
4494                                 break;
4495                         }
4496                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4497                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
4498                             lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4499                             lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4500                                 cmd->result = DID_REQUEUE << 16;
4501                                 break;
4502                         }
4503                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4504                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4505                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4506                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4507                                         /*
4508                                          * This is a response for a BG enabled
4509                                          * cmd. Parse BG error
4510                                          */
4511                                         lpfc_parse_bg_err(phba, lpfc_cmd,
4512                                                         pIocbOut);
4513                                         break;
4514                                 } else {
4515                                         lpfc_printf_vlog(vport, KERN_WARNING,
4516                                                         LOG_BG,
4517                                                         "9031 non-zero BGSTAT "
4518                                                         "on unprotected cmd\n");
4519                                 }
4520                         }
4521                         if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4522                                 && (phba->sli_rev == LPFC_SLI_REV4)
4523                                 && pnode) {
4524                                 /* This IO was aborted by the target, we don't
4525                                  * know the rxid and because we did not send the
4526                                  * ABTS we cannot generate and RRQ.
4527                                  */
4528                                 lpfc_set_rrq_active(phba, pnode,
4529                                         lpfc_cmd->cur_iocbq.sli4_lxritag,
4530                                         0, 0);
4531                         }
4532                         fallthrough;
4533                 default:
4534                         cmd->result = DID_ERROR << 16;
4535                         break;
4536                 }
4537
4538                 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4539                         cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4540                                       SAM_STAT_BUSY;
4541         } else
4542                 cmd->result = DID_OK << 16;
4543
4544         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4545                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4546
4547                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4548                                  "0710 Iodone <%d/%llu> cmd x%px, error "
4549                                  "x%x SNS x%x x%x Data: x%x x%x\n",
4550                                  cmd->device->id, cmd->device->lun, cmd,
4551                                  cmd->result, *lp, *(lp + 3), cmd->retries,
4552                                  scsi_get_resid(cmd));
4553         }
4554
4555         lpfc_update_stats(vport, lpfc_cmd);
4556         if (vport->cfg_max_scsicmpl_time &&
4557            time_after(jiffies, lpfc_cmd->start_time +
4558                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4559                 spin_lock_irqsave(shost->host_lock, flags);
4560                 if (pnode) {
4561                         if (pnode->cmd_qdepth >
4562                                 atomic_read(&pnode->cmd_pending) &&
4563                                 (atomic_read(&pnode->cmd_pending) >
4564                                 LPFC_MIN_TGT_QDEPTH) &&
4565                                 ((cmd->cmnd[0] == READ_10) ||
4566                                 (cmd->cmnd[0] == WRITE_10)))
4567                                 pnode->cmd_qdepth =
4568                                         atomic_read(&pnode->cmd_pending);
4569
4570                         pnode->last_change_time = jiffies;
4571                 }
4572                 spin_unlock_irqrestore(shost->host_lock, flags);
4573         }
4574         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4575
4576         lpfc_cmd->pCmd = NULL;
4577         spin_unlock(&lpfc_cmd->buf_lock);
4578
4579 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4580         if (lpfc_cmd->ts_cmd_start) {
4581                 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4582                 lpfc_cmd->ts_data_io = ktime_get_ns();
4583                 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4584                 lpfc_io_ktime(phba, lpfc_cmd);
4585         }
4586 #endif
4587         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4588         cmd->scsi_done(cmd);
4589
4590         /*
4591          * If there is an abort thread waiting for command completion
4592          * wake up the thread.
4593          */
4594         spin_lock(&lpfc_cmd->buf_lock);
4595         lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4596         if (lpfc_cmd->waitq)
4597                 wake_up(lpfc_cmd->waitq);
4598         spin_unlock(&lpfc_cmd->buf_lock);
4599
4600         lpfc_release_scsi_buf(phba, lpfc_cmd);
4601 }
4602
4603 /**
4604  * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4605  * @phba: Pointer to vport object for which I/O is executed
4606  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4607  * @tmo: timeout value for the IO
4608  *
4609  * Based on the data-direction of the command, initialize IOCB
4610  * in the I/O buffer. Fill in the IOCB fields which are independent
4611  * of the scsi buffer
4612  *
4613  * RETURNS 0 - SUCCESS,
4614  **/
4615 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4616                                       struct lpfc_io_buf *lpfc_cmd,
4617                                       uint8_t tmo)
4618 {
4619         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4620         struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4621         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4622         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4623         struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4624         int datadir = scsi_cmnd->sc_data_direction;
4625         u32 fcpdl;
4626
4627         piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4628
4629         /*
4630          * There are three possibilities here - use scatter-gather segment, use
4631          * the single mapping, or neither.  Start the lpfc command prep by
4632          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4633          * data bde entry.
4634          */
4635         if (scsi_sg_count(scsi_cmnd)) {
4636                 if (datadir == DMA_TO_DEVICE) {
4637                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4638                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4639                         if (vport->cfg_first_burst_size &&
4640                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
4641                                 u32 xrdy_len;
4642
4643                                 fcpdl = scsi_bufflen(scsi_cmnd);
4644                                 xrdy_len = min(fcpdl,
4645                                                vport->cfg_first_burst_size);
4646                                 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4647                         }
4648                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
4649                 } else {
4650                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4651                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4652                         fcp_cmnd->fcpCntl3 = READ_DATA;
4653                 }
4654         } else {
4655                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4656                 iocb_cmd->un.fcpi.fcpi_parm = 0;
4657                 iocb_cmd->ulpPU = 0;
4658                 fcp_cmnd->fcpCntl3 = 0;
4659         }
4660
4661         /*
4662          * Finish initializing those IOCB fields that are independent
4663          * of the scsi_cmnd request_buffer
4664          */
4665         piocbq->iocb.ulpContext = pnode->nlp_rpi;
4666         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4667                 piocbq->iocb.ulpFCP2Rcvy = 1;
4668         else
4669                 piocbq->iocb.ulpFCP2Rcvy = 0;
4670
4671         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4672         piocbq->context1  = lpfc_cmd;
4673         if (!piocbq->iocb_cmpl)
4674                 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4675         piocbq->iocb.ulpTimeout = tmo;
4676         piocbq->vport = vport;
4677         return 0;
4678 }
4679
4680 /**
4681  * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4682  * @phba: Pointer to vport object for which I/O is executed
4683  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4684  * @tmo: timeout value for the IO
4685  *
4686  * Based on the data-direction of the command copy WQE template
4687  * to I/O buffer WQE. Fill in the WQE fields which are independent
4688  * of the scsi buffer
4689  *
4690  * RETURNS 0 - SUCCESS,
4691  **/
4692 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4693                                       struct lpfc_io_buf *lpfc_cmd,
4694                                       uint8_t tmo)
4695 {
4696         struct lpfc_hba *phba = vport->phba;
4697         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4698         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4699         struct lpfc_sli4_hdw_queue *hdwq = NULL;
4700         struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4701         struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4702         union lpfc_wqe128 *wqe = &pwqeq->wqe;
4703         u16 idx = lpfc_cmd->hdwq_no;
4704         int datadir = scsi_cmnd->sc_data_direction;
4705
4706         hdwq = &phba->sli4_hba.hdwq[idx];
4707
4708         /* Initialize 64 bytes only */
4709         memset(wqe, 0, sizeof(union lpfc_wqe128));
4710
4711         /*
4712          * There are three possibilities here - use scatter-gather segment, use
4713          * the single mapping, or neither.
4714          */
4715         if (scsi_sg_count(scsi_cmnd)) {
4716                 if (datadir == DMA_TO_DEVICE) {
4717                         /* From the iwrite template, initialize words 7 -  11 */
4718                         memcpy(&wqe->words[7],
4719                                &lpfc_iwrite_cmd_template.words[7],
4720                                sizeof(uint32_t) * 5);
4721
4722                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
4723                         if (hdwq)
4724                                 hdwq->scsi_cstat.output_requests++;
4725                 } else {
4726                         /* From the iread template, initialize words 7 - 11 */
4727                         memcpy(&wqe->words[7],
4728                                &lpfc_iread_cmd_template.words[7],
4729                                sizeof(uint32_t) * 5);
4730
4731                         /* Word 7 */
4732                         bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4733
4734                         fcp_cmnd->fcpCntl3 = READ_DATA;
4735                         if (hdwq)
4736                                 hdwq->scsi_cstat.input_requests++;
4737                 }
4738         } else {
4739                 /* From the icmnd template, initialize words 4 - 11 */
4740                 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4741                        sizeof(uint32_t) * 8);
4742
4743                 /* Word 7 */
4744                 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4745
4746                 fcp_cmnd->fcpCntl3 = 0;
4747                 if (hdwq)
4748                         hdwq->scsi_cstat.control_requests++;
4749         }
4750
4751         /*
4752          * Finish initializing those WQE fields that are independent
4753          * of the request_buffer
4754          */
4755
4756          /* Word 3 */
4757         bf_set(payload_offset_len, &wqe->fcp_icmd,
4758                sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4759
4760         /* Word 6 */
4761         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4762                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4763         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4764
4765         /* Word 7*/
4766         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4767                 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4768
4769         bf_set(wqe_class, &wqe->generic.wqe_com,
4770                (pnode->nlp_fcp_info & 0x0f));
4771
4772          /* Word 8 */
4773         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4774
4775         /* Word 9 */
4776         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4777
4778         pwqeq->vport = vport;
4779         pwqeq->vport = vport;
4780         pwqeq->context1 = lpfc_cmd;
4781         pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4782         pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4783
4784         return 0;
4785 }
4786
4787 /**
4788  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4789  * @vport: The virtual port for which this call is being executed.
4790  * @lpfc_cmd: The scsi command which needs to send.
4791  * @pnode: Pointer to lpfc_nodelist.
4792  *
4793  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4794  * to transfer for device with SLI3 interface spec.
4795  **/
4796 static int
4797 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4798                     struct lpfc_nodelist *pnode)
4799 {
4800         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4801         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4802         u8 *ptr;
4803
4804         if (!pnode)
4805                 return 0;
4806
4807         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4808         /* clear task management bits */
4809         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4810
4811         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4812                        &lpfc_cmd->fcp_cmnd->fcp_lun);
4813
4814         ptr = &fcp_cmnd->fcpCdb[0];
4815         memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4816         if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4817                 ptr += scsi_cmnd->cmd_len;
4818                 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4819         }
4820
4821         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4822
4823         lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4824
4825         return 0;
4826 }
4827
4828 /**
4829  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4830  * @vport: The virtual port for which this call is being executed.
4831  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4832  * @lun: Logical unit number.
4833  * @task_mgmt_cmd: SCSI task management command.
4834  *
4835  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4836  * for device with SLI-3 interface spec.
4837  *
4838  * Return codes:
4839  *   0 - Error
4840  *   1 - Success
4841  **/
4842 static int
4843 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4844                              struct lpfc_io_buf *lpfc_cmd,
4845                              uint64_t lun,
4846                              uint8_t task_mgmt_cmd)
4847 {
4848         struct lpfc_iocbq *piocbq;
4849         IOCB_t *piocb;
4850         struct fcp_cmnd *fcp_cmnd;
4851         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4852         struct lpfc_nodelist *ndlp = rdata->pnode;
4853
4854         if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4855                 return 0;
4856
4857         piocbq = &(lpfc_cmd->cur_iocbq);
4858         piocbq->vport = vport;
4859
4860         piocb = &piocbq->iocb;
4861
4862         fcp_cmnd = lpfc_cmd->fcp_cmnd;
4863         /* Clear out any old data in the FCP command area */
4864         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4865         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4866         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4867         if (vport->phba->sli_rev == 3 &&
4868             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4869                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4870         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4871         piocb->ulpContext = ndlp->nlp_rpi;
4872         if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4873                 piocb->ulpContext =
4874                   vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4875         }
4876         piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4877         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4878         piocb->ulpPU = 0;
4879         piocb->un.fcpi.fcpi_parm = 0;
4880
4881         /* ulpTimeout is only one byte */
4882         if (lpfc_cmd->timeout > 0xff) {
4883                 /*
4884                  * Do not timeout the command at the firmware level.
4885                  * The driver will provide the timeout mechanism.
4886                  */
4887                 piocb->ulpTimeout = 0;
4888         } else
4889                 piocb->ulpTimeout = lpfc_cmd->timeout;
4890
4891         if (vport->phba->sli_rev == LPFC_SLI_REV4)
4892                 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4893
4894         return 1;
4895 }
4896
4897 /**
4898  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4899  * @phba: The hba struct for which this call is being executed.
4900  * @dev_grp: The HBA PCI-Device group number.
4901  *
4902  * This routine sets up the SCSI interface API function jump table in @phba
4903  * struct.
4904  * Returns: 0 - success, -ENODEV - failure.
4905  **/
4906 int
4907 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4908 {
4909
4910         phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4911
4912         switch (dev_grp) {
4913         case LPFC_PCI_DEV_LP:
4914                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4915                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4916                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4917                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4918                 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4919                 break;
4920         case LPFC_PCI_DEV_OC:
4921                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4922                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4923                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4924                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4925                 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4926                 break;
4927         default:
4928                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4929                                 "1418 Invalid HBA PCI-device group: 0x%x\n",
4930                                 dev_grp);
4931                 return -ENODEV;
4932         }
4933         phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4934         phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4935         return 0;
4936 }
4937
4938 /**
4939  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4940  * @phba: The Hba for which this call is being executed.
4941  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4942  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4943  *
4944  * This routine is IOCB completion routine for device reset and target reset
4945  * routine. This routine release scsi buffer associated with lpfc_cmd.
4946  **/
4947 static void
4948 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4949                         struct lpfc_iocbq *cmdiocbq,
4950                         struct lpfc_iocbq *rspiocbq)
4951 {
4952         struct lpfc_io_buf *lpfc_cmd =
4953                 (struct lpfc_io_buf *) cmdiocbq->context1;
4954         if (lpfc_cmd)
4955                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4956         return;
4957 }
4958
4959 /**
4960  * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4961  *                             if issuing a pci_bus_reset is possibly unsafe
4962  * @phba: lpfc_hba pointer.
4963  *
4964  * Description:
4965  * Walks the bus_list to ensure only PCI devices with Emulex
4966  * vendor id, device ids that support hot reset, and only one occurrence
4967  * of function 0.
4968  *
4969  * Returns:
4970  * -EBADSLT,  detected invalid device
4971  *      0,    successful
4972  */
4973 int
4974 lpfc_check_pci_resettable(struct lpfc_hba *phba)
4975 {
4976         const struct pci_dev *pdev = phba->pcidev;
4977         struct pci_dev *ptr = NULL;
4978         u8 counter = 0;
4979
4980         /* Walk the list of devices on the pci_dev's bus */
4981         list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4982                 /* Check for Emulex Vendor ID */
4983                 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4984                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4985                                         "8346 Non-Emulex vendor found: "
4986                                         "0x%04x\n", ptr->vendor);
4987                         return -EBADSLT;
4988                 }
4989
4990                 /* Check for valid Emulex Device ID */
4991                 switch (ptr->device) {
4992                 case PCI_DEVICE_ID_LANCER_FC:
4993                 case PCI_DEVICE_ID_LANCER_G6_FC:
4994                 case PCI_DEVICE_ID_LANCER_G7_FC:
4995                         break;
4996                 default:
4997                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4998                                         "8347 Invalid device found: "
4999                                         "0x%04x\n", ptr->device);
5000                         return -EBADSLT;
5001                 }
5002
5003                 /* Check for only one function 0 ID to ensure only one HBA on
5004                  * secondary bus
5005                  */
5006                 if (ptr->devfn == 0) {
5007                         if (++counter > 1) {
5008                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5009                                                 "8348 More than one device on "
5010                                                 "secondary bus found\n");
5011                                 return -EBADSLT;
5012                         }
5013                 }
5014         }
5015
5016         return 0;
5017 }
5018
5019 /**
5020  * lpfc_info - Info entry point of scsi_host_template data structure
5021  * @host: The scsi host for which this call is being executed.
5022  *
5023  * This routine provides module information about hba.
5024  *
5025  * Reutrn code:
5026  *   Pointer to char - Success.
5027  **/
5028 const char *
5029 lpfc_info(struct Scsi_Host *host)
5030 {
5031         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5032         struct lpfc_hba   *phba = vport->phba;
5033         int link_speed = 0;
5034         static char lpfcinfobuf[384];
5035         char tmp[384] = {0};
5036
5037         memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5038         if (phba && phba->pcidev){
5039                 /* Model Description */
5040                 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5041                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5042                     sizeof(lpfcinfobuf))
5043                         goto buffer_done;
5044
5045                 /* PCI Info */
5046                 scnprintf(tmp, sizeof(tmp),
5047                           " on PCI bus %02x device %02x irq %d",
5048                           phba->pcidev->bus->number, phba->pcidev->devfn,
5049                           phba->pcidev->irq);
5050                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5051                     sizeof(lpfcinfobuf))
5052                         goto buffer_done;
5053
5054                 /* Port Number */
5055                 if (phba->Port[0]) {
5056                         scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5057                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5058                             sizeof(lpfcinfobuf))
5059                                 goto buffer_done;
5060                 }
5061
5062                 /* Link Speed */
5063                 link_speed = lpfc_sli_port_speed_get(phba);
5064                 if (link_speed != 0) {
5065                         scnprintf(tmp, sizeof(tmp),
5066                                   " Logical Link Speed: %d Mbps", link_speed);
5067                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5068                             sizeof(lpfcinfobuf))
5069                                 goto buffer_done;
5070                 }
5071
5072                 /* PCI resettable */
5073                 if (!lpfc_check_pci_resettable(phba)) {
5074                         scnprintf(tmp, sizeof(tmp), " PCI resettable");
5075                         strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5076                 }
5077         }
5078
5079 buffer_done:
5080         return lpfcinfobuf;
5081 }
5082
5083 /**
5084  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
5085  * @phba: The Hba for which this call is being executed.
5086  *
5087  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
5088  * The default value of cfg_poll_tmo is 10 milliseconds.
5089  **/
5090 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5091 {
5092         unsigned long  poll_tmo_expires =
5093                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5094
5095         if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5096                 mod_timer(&phba->fcp_poll_timer,
5097                           poll_tmo_expires);
5098 }
5099
5100 /**
5101  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5102  * @phba: The Hba for which this call is being executed.
5103  *
5104  * This routine starts the fcp_poll_timer of @phba.
5105  **/
5106 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5107 {
5108         lpfc_poll_rearm_timer(phba);
5109 }
5110
5111 /**
5112  * lpfc_poll_timeout - Restart polling timer
5113  * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5114  *
5115  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
5116  * and FCP Ring interrupt is disable.
5117  **/
5118 void lpfc_poll_timeout(struct timer_list *t)
5119 {
5120         struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5121
5122         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5123                 lpfc_sli_handle_fast_ring_event(phba,
5124                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5125
5126                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5127                         lpfc_poll_rearm_timer(phba);
5128         }
5129 }
5130
5131 /**
5132  * lpfc_queuecommand - scsi_host_template queuecommand entry point
5133  * @shost: kernel scsi host pointer.
5134  * @cmnd: Pointer to scsi_cmnd data structure.
5135  *
5136  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5137  * This routine prepares an IOCB from scsi command and provides to firmware.
5138  * The @done callback is invoked after driver finished processing the command.
5139  *
5140  * Return value :
5141  *   0 - Success
5142  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5143  **/
5144 static int
5145 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5146 {
5147         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5148         struct lpfc_hba   *phba = vport->phba;
5149         struct lpfc_rport_data *rdata;
5150         struct lpfc_nodelist *ndlp;
5151         struct lpfc_io_buf *lpfc_cmd;
5152         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5153         int err, idx;
5154 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5155         uint64_t start = 0L;
5156
5157         if (phba->ktime_on)
5158                 start = ktime_get_ns();
5159 #endif
5160
5161         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5162
5163         /* sanity check on references */
5164         if (unlikely(!rdata) || unlikely(!rport))
5165                 goto out_fail_command;
5166
5167         err = fc_remote_port_chkready(rport);
5168         if (err) {
5169                 cmnd->result = err;
5170                 goto out_fail_command;
5171         }
5172         ndlp = rdata->pnode;
5173
5174         if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5175                 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5176
5177                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5178                                 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5179                                 " op:%02x str=%s without registering for"
5180                                 " BlockGuard - Rejecting command\n",
5181                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5182                                 dif_op_str[scsi_get_prot_op(cmnd)]);
5183                 goto out_fail_command;
5184         }
5185
5186         /*
5187          * Catch race where our node has transitioned, but the
5188          * transport is still transitioning.
5189          */
5190         if (!ndlp)
5191                 goto out_tgt_busy;
5192         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5193                 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5194                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5195                                          "3377 Target Queue Full, scsi Id:%d "
5196                                          "Qdepth:%d Pending command:%d"
5197                                          " WWNN:%02x:%02x:%02x:%02x:"
5198                                          "%02x:%02x:%02x:%02x, "
5199                                          " WWPN:%02x:%02x:%02x:%02x:"
5200                                          "%02x:%02x:%02x:%02x",
5201                                          ndlp->nlp_sid, ndlp->cmd_qdepth,
5202                                          atomic_read(&ndlp->cmd_pending),
5203                                          ndlp->nlp_nodename.u.wwn[0],
5204                                          ndlp->nlp_nodename.u.wwn[1],
5205                                          ndlp->nlp_nodename.u.wwn[2],
5206                                          ndlp->nlp_nodename.u.wwn[3],
5207                                          ndlp->nlp_nodename.u.wwn[4],
5208                                          ndlp->nlp_nodename.u.wwn[5],
5209                                          ndlp->nlp_nodename.u.wwn[6],
5210                                          ndlp->nlp_nodename.u.wwn[7],
5211                                          ndlp->nlp_portname.u.wwn[0],
5212                                          ndlp->nlp_portname.u.wwn[1],
5213                                          ndlp->nlp_portname.u.wwn[2],
5214                                          ndlp->nlp_portname.u.wwn[3],
5215                                          ndlp->nlp_portname.u.wwn[4],
5216                                          ndlp->nlp_portname.u.wwn[5],
5217                                          ndlp->nlp_portname.u.wwn[6],
5218                                          ndlp->nlp_portname.u.wwn[7]);
5219                         goto out_tgt_busy;
5220                 }
5221         }
5222
5223         lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5224         if (lpfc_cmd == NULL) {
5225                 lpfc_rampdown_queue_depth(phba);
5226
5227                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5228                                  "0707 driver's buffer pool is empty, "
5229                                  "IO busied\n");
5230                 goto out_host_busy;
5231         }
5232
5233         /*
5234          * Store the midlayer's command structure for the completion phase
5235          * and complete the command initialization.
5236          */
5237         lpfc_cmd->pCmd  = cmnd;
5238         lpfc_cmd->rdata = rdata;
5239         lpfc_cmd->ndlp = ndlp;
5240         lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
5241         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5242
5243         err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5244         if (err)
5245                 goto out_host_busy_release_buf;
5246
5247         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5248                 if (vport->phba->cfg_enable_bg) {
5249                         lpfc_printf_vlog(vport,
5250                                          KERN_INFO, LOG_SCSI_CMD,
5251                                          "9033 BLKGRD: rcvd %s cmd:x%x "
5252                                          "sector x%llx cnt %u pt %x\n",
5253                                          dif_op_str[scsi_get_prot_op(cmnd)],
5254                                          cmnd->cmnd[0],
5255                                          (unsigned long long)scsi_get_lba(cmnd),
5256                                          blk_rq_sectors(cmnd->request),
5257                                          (cmnd->cmnd[1]>>5));
5258                 }
5259                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5260         } else {
5261                 if (vport->phba->cfg_enable_bg) {
5262                         lpfc_printf_vlog(vport,
5263                                          KERN_INFO, LOG_SCSI_CMD,
5264                                          "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5265                                          "x%x sector x%llx cnt %u pt %x\n",
5266                                          cmnd->cmnd[0],
5267                                          (unsigned long long)scsi_get_lba(cmnd),
5268                                          blk_rq_sectors(cmnd->request),
5269                                          (cmnd->cmnd[1]>>5));
5270                 }
5271                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5272         }
5273
5274         if (unlikely(err)) {
5275                 if (err == 2) {
5276                         cmnd->result = DID_ERROR << 16;
5277                         goto out_fail_command_release_buf;
5278                 }
5279                 goto out_host_busy_free_buf;
5280         }
5281
5282
5283 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5284         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5285                 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5286 #endif
5287         /* Issue I/O to adapter */
5288         err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5289                                     &lpfc_cmd->cur_iocbq,
5290                                     SLI_IOCB_RET_IOCB);
5291 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5292         if (start) {
5293                 lpfc_cmd->ts_cmd_start = start;
5294                 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5295                 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5296         } else {
5297                 lpfc_cmd->ts_cmd_start = 0;
5298         }
5299 #endif
5300         if (err) {
5301                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5302                                    "3376 FCP could not issue IOCB err %x "
5303                                    "FCP cmd x%x <%d/%llu> "
5304                                    "sid: x%x did: x%x oxid: x%x "
5305                                    "Data: x%x x%x x%x x%x\n",
5306                                    err, cmnd->cmnd[0],
5307                                    cmnd->device ? cmnd->device->id : 0xffff,
5308                                    cmnd->device ? cmnd->device->lun : (u64)-1,
5309                                    vport->fc_myDID, ndlp->nlp_DID,
5310                                    phba->sli_rev == LPFC_SLI_REV4 ?
5311                                    lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5312                                    phba->sli_rev == LPFC_SLI_REV4 ?
5313                                    phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5314                                    lpfc_cmd->cur_iocbq.iocb.ulpContext,
5315                                    lpfc_cmd->cur_iocbq.iotag,
5316                                    phba->sli_rev == LPFC_SLI_REV4 ?
5317                                    bf_get(wqe_tmo,
5318                                    &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5319                                    lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
5320                                    (uint32_t)
5321                                    (cmnd->request->timeout / 1000));
5322
5323                 goto out_host_busy_free_buf;
5324         }
5325
5326         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5327                 lpfc_sli_handle_fast_ring_event(phba,
5328                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5329
5330                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5331                         lpfc_poll_rearm_timer(phba);
5332         }
5333
5334         if (phba->cfg_xri_rebalancing)
5335                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5336
5337         return 0;
5338
5339  out_host_busy_free_buf:
5340         idx = lpfc_cmd->hdwq_no;
5341         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5342         if (phba->sli4_hba.hdwq) {
5343                 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5344                 case WRITE_DATA:
5345                         phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5346                         break;
5347                 case READ_DATA:
5348                         phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5349                         break;
5350                 default:
5351                         phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5352                 }
5353         }
5354  out_host_busy_release_buf:
5355         lpfc_release_scsi_buf(phba, lpfc_cmd);
5356  out_host_busy:
5357         return SCSI_MLQUEUE_HOST_BUSY;
5358
5359  out_tgt_busy:
5360         return SCSI_MLQUEUE_TARGET_BUSY;
5361
5362  out_fail_command_release_buf:
5363         lpfc_release_scsi_buf(phba, lpfc_cmd);
5364
5365  out_fail_command:
5366         cmnd->scsi_done(cmnd);
5367         return 0;
5368 }
5369
5370
5371 /**
5372  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5373  * @cmnd: Pointer to scsi_cmnd data structure.
5374  *
5375  * This routine aborts @cmnd pending in base driver.
5376  *
5377  * Return code :
5378  *   0x2003 - Error
5379  *   0x2002 - Success
5380  **/
5381 static int
5382 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5383 {
5384         struct Scsi_Host  *shost = cmnd->device->host;
5385         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5386         struct lpfc_hba   *phba = vport->phba;
5387         struct lpfc_iocbq *iocb;
5388         struct lpfc_io_buf *lpfc_cmd;
5389         int ret = SUCCESS, status = 0;
5390         struct lpfc_sli_ring *pring_s4 = NULL;
5391         struct lpfc_sli_ring *pring = NULL;
5392         int ret_val;
5393         unsigned long flags;
5394         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5395
5396         status = fc_block_scsi_eh(cmnd);
5397         if (status != 0 && status != SUCCESS)
5398                 return status;
5399
5400         lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5401         if (!lpfc_cmd)
5402                 return ret;
5403
5404         spin_lock_irqsave(&phba->hbalock, flags);
5405         /* driver queued commands are in process of being flushed */
5406         if (phba->hba_flag & HBA_IOQ_FLUSH) {
5407                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5408                         "3168 SCSI Layer abort requested I/O has been "
5409                         "flushed by LLD.\n");
5410                 ret = FAILED;
5411                 goto out_unlock;
5412         }
5413
5414         /* Guard against IO completion being called at same time */
5415         spin_lock(&lpfc_cmd->buf_lock);
5416
5417         if (!lpfc_cmd->pCmd) {
5418                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5419                          "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5420                          "x%x ID %d LUN %llu\n",
5421                          SUCCESS, cmnd->device->id, cmnd->device->lun);
5422                 goto out_unlock_buf;
5423         }
5424
5425         iocb = &lpfc_cmd->cur_iocbq;
5426         if (phba->sli_rev == LPFC_SLI_REV4) {
5427                 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5428                 if (!pring_s4) {
5429                         ret = FAILED;
5430                         goto out_unlock_buf;
5431                 }
5432                 spin_lock(&pring_s4->ring_lock);
5433         }
5434         /* the command is in process of being cancelled */
5435         if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
5436                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5437                         "3169 SCSI Layer abort requested I/O has been "
5438                         "cancelled by LLD.\n");
5439                 ret = FAILED;
5440                 goto out_unlock_ring;
5441         }
5442         /*
5443          * If pCmd field of the corresponding lpfc_io_buf structure
5444          * points to a different SCSI command, then the driver has
5445          * already completed this command, but the midlayer did not
5446          * see the completion before the eh fired. Just return SUCCESS.
5447          */
5448         if (lpfc_cmd->pCmd != cmnd) {
5449                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5450                         "3170 SCSI Layer abort requested I/O has been "
5451                         "completed by LLD.\n");
5452                 goto out_unlock_ring;
5453         }
5454
5455         BUG_ON(iocb->context1 != lpfc_cmd);
5456
5457         /* abort issued in recovery is still in progress */
5458         if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5459                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5460                          "3389 SCSI Layer I/O Abort Request is pending\n");
5461                 if (phba->sli_rev == LPFC_SLI_REV4)
5462                         spin_unlock(&pring_s4->ring_lock);
5463                 spin_unlock(&lpfc_cmd->buf_lock);
5464                 spin_unlock_irqrestore(&phba->hbalock, flags);
5465                 goto wait_for_cmpl;
5466         }
5467
5468         lpfc_cmd->waitq = &waitq;
5469         if (phba->sli_rev == LPFC_SLI_REV4) {
5470                 spin_unlock(&pring_s4->ring_lock);
5471                 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5472                                                       lpfc_sli4_abort_fcp_cmpl);
5473         } else {
5474                 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5475                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5476                                                      lpfc_sli_abort_fcp_cmpl);
5477         }
5478
5479         if (ret_val != IOCB_SUCCESS) {
5480                 /* Indicate the IO is not being aborted by the driver. */
5481                 lpfc_cmd->waitq = NULL;
5482                 spin_unlock(&lpfc_cmd->buf_lock);
5483                 spin_unlock_irqrestore(&phba->hbalock, flags);
5484                 ret = FAILED;
5485                 goto out;
5486         }
5487
5488         /* no longer need the lock after this point */
5489         spin_unlock(&lpfc_cmd->buf_lock);
5490         spin_unlock_irqrestore(&phba->hbalock, flags);
5491
5492         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5493                 lpfc_sli_handle_fast_ring_event(phba,
5494                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5495
5496 wait_for_cmpl:
5497         /*
5498          * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
5499          * for abort to complete.
5500          */
5501         wait_event_timeout(waitq,
5502                           (lpfc_cmd->pCmd != cmnd),
5503                            msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5504
5505         spin_lock(&lpfc_cmd->buf_lock);
5506
5507         if (lpfc_cmd->pCmd == cmnd) {
5508                 ret = FAILED;
5509                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5510                                  "0748 abort handler timed out waiting "
5511                                  "for aborting I/O (xri:x%x) to complete: "
5512                                  "ret %#x, ID %d, LUN %llu\n",
5513                                  iocb->sli4_xritag, ret,
5514                                  cmnd->device->id, cmnd->device->lun);
5515         }
5516
5517         lpfc_cmd->waitq = NULL;
5518
5519         spin_unlock(&lpfc_cmd->buf_lock);
5520         goto out;
5521
5522 out_unlock_ring:
5523         if (phba->sli_rev == LPFC_SLI_REV4)
5524                 spin_unlock(&pring_s4->ring_lock);
5525 out_unlock_buf:
5526         spin_unlock(&lpfc_cmd->buf_lock);
5527 out_unlock:
5528         spin_unlock_irqrestore(&phba->hbalock, flags);
5529 out:
5530         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5531                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5532                          "LUN %llu\n", ret, cmnd->device->id,
5533                          cmnd->device->lun);
5534         return ret;
5535 }
5536
5537 static char *
5538 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5539 {
5540         switch (task_mgmt_cmd) {
5541         case FCP_ABORT_TASK_SET:
5542                 return "ABORT_TASK_SET";
5543         case FCP_CLEAR_TASK_SET:
5544                 return "FCP_CLEAR_TASK_SET";
5545         case FCP_BUS_RESET:
5546                 return "FCP_BUS_RESET";
5547         case FCP_LUN_RESET:
5548                 return "FCP_LUN_RESET";
5549         case FCP_TARGET_RESET:
5550                 return "FCP_TARGET_RESET";
5551         case FCP_CLEAR_ACA:
5552                 return "FCP_CLEAR_ACA";
5553         case FCP_TERMINATE_TASK:
5554                 return "FCP_TERMINATE_TASK";
5555         default:
5556                 return "unknown";
5557         }
5558 }
5559
5560
5561 /**
5562  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5563  * @vport: The virtual port for which this call is being executed.
5564  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5565  *
5566  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5567  *
5568  * Return code :
5569  *   0x2003 - Error
5570  *   0x2002 - Success
5571  **/
5572 static int
5573 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5574 {
5575         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5576         uint32_t rsp_info;
5577         uint32_t rsp_len;
5578         uint8_t  rsp_info_code;
5579         int ret = FAILED;
5580
5581
5582         if (fcprsp == NULL)
5583                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5584                                  "0703 fcp_rsp is missing\n");
5585         else {
5586                 rsp_info = fcprsp->rspStatus2;
5587                 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5588                 rsp_info_code = fcprsp->rspInfo3;
5589
5590
5591                 lpfc_printf_vlog(vport, KERN_INFO,
5592                                  LOG_FCP,
5593                                  "0706 fcp_rsp valid 0x%x,"
5594                                  " rsp len=%d code 0x%x\n",
5595                                  rsp_info,
5596                                  rsp_len, rsp_info_code);
5597
5598                 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5599                  * field specifies the number of valid bytes of FCP_RSP_INFO.
5600                  * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5601                  */
5602                 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5603                     ((rsp_len == 8) || (rsp_len == 4))) {
5604                         switch (rsp_info_code) {
5605                         case RSP_NO_FAILURE:
5606                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5607                                                  "0715 Task Mgmt No Failure\n");
5608                                 ret = SUCCESS;
5609                                 break;
5610                         case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5611                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5612                                                  "0716 Task Mgmt Target "
5613                                                 "reject\n");
5614                                 break;
5615                         case RSP_TM_NOT_COMPLETED: /* TM failed */
5616                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5617                                                  "0717 Task Mgmt Target "
5618                                                 "failed TM\n");
5619                                 break;
5620                         case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5621                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5622                                                  "0718 Task Mgmt to invalid "
5623                                                 "LUN\n");
5624                                 break;
5625                         }
5626                 }
5627         }
5628         return ret;
5629 }
5630
5631
5632 /**
5633  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5634  * @vport: The virtual port for which this call is being executed.
5635  * @cmnd: Pointer to scsi_cmnd data structure.
5636  * @tgt_id: Target ID of remote device.
5637  * @lun_id: Lun number for the TMF
5638  * @task_mgmt_cmd: type of TMF to send
5639  *
5640  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5641  * a remote port.
5642  *
5643  * Return Code:
5644  *   0x2003 - Error
5645  *   0x2002 - Success.
5646  **/
5647 static int
5648 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5649                    unsigned int tgt_id, uint64_t lun_id,
5650                    uint8_t task_mgmt_cmd)
5651 {
5652         struct lpfc_hba   *phba = vport->phba;
5653         struct lpfc_io_buf *lpfc_cmd;
5654         struct lpfc_iocbq *iocbq;
5655         struct lpfc_iocbq *iocbqrsp;
5656         struct lpfc_rport_data *rdata;
5657         struct lpfc_nodelist *pnode;
5658         int ret;
5659         int status;
5660
5661         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5662         if (!rdata || !rdata->pnode)
5663                 return FAILED;
5664         pnode = rdata->pnode;
5665
5666         lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5667         if (lpfc_cmd == NULL)
5668                 return FAILED;
5669         lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5670         lpfc_cmd->rdata = rdata;
5671         lpfc_cmd->pCmd = cmnd;
5672         lpfc_cmd->ndlp = pnode;
5673
5674         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5675                                            task_mgmt_cmd);
5676         if (!status) {
5677                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5678                 return FAILED;
5679         }
5680
5681         iocbq = &lpfc_cmd->cur_iocbq;
5682         iocbqrsp = lpfc_sli_get_iocbq(phba);
5683         if (iocbqrsp == NULL) {
5684                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5685                 return FAILED;
5686         }
5687         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5688
5689         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5690                          "0702 Issue %s to TGT %d LUN %llu "
5691                          "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5692                          lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5693                          pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5694                          iocbq->iocb_flag);
5695
5696         status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5697                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
5698         if ((status != IOCB_SUCCESS) ||
5699             (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5700                 if (status != IOCB_SUCCESS ||
5701                     iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5702                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5703                                          "0727 TMF %s to TGT %d LUN %llu "
5704                                          "failed (%d, %d) iocb_flag x%x\n",
5705                                          lpfc_taskmgmt_name(task_mgmt_cmd),
5706                                          tgt_id, lun_id,
5707                                          iocbqrsp->iocb.ulpStatus,
5708                                          iocbqrsp->iocb.un.ulpWord[4],
5709                                          iocbq->iocb_flag);
5710                 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5711                 if (status == IOCB_SUCCESS) {
5712                         if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5713                                 /* Something in the FCP_RSP was invalid.
5714                                  * Check conditions */
5715                                 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5716                         else
5717                                 ret = FAILED;
5718                 } else if (status == IOCB_TIMEDOUT) {
5719                         ret = TIMEOUT_ERROR;
5720                 } else {
5721                         ret = FAILED;
5722                 }
5723         } else
5724                 ret = SUCCESS;
5725
5726         lpfc_sli_release_iocbq(phba, iocbqrsp);
5727
5728         if (ret != TIMEOUT_ERROR)
5729                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5730
5731         return ret;
5732 }
5733
5734 /**
5735  * lpfc_chk_tgt_mapped -
5736  * @vport: The virtual port to check on
5737  * @cmnd: Pointer to scsi_cmnd data structure.
5738  *
5739  * This routine delays until the scsi target (aka rport) for the
5740  * command exists (is present and logged in) or we declare it non-existent.
5741  *
5742  * Return code :
5743  *  0x2003 - Error
5744  *  0x2002 - Success
5745  **/
5746 static int
5747 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5748 {
5749         struct lpfc_rport_data *rdata;
5750         struct lpfc_nodelist *pnode;
5751         unsigned long later;
5752
5753         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5754         if (!rdata) {
5755                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5756                         "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5757                 return FAILED;
5758         }
5759         pnode = rdata->pnode;
5760         /*
5761          * If target is not in a MAPPED state, delay until
5762          * target is rediscovered or devloss timeout expires.
5763          */
5764         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5765         while (time_after(later, jiffies)) {
5766                 if (!pnode)
5767                         return FAILED;
5768                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5769                         return SUCCESS;
5770                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5771                 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5772                 if (!rdata)
5773                         return FAILED;
5774                 pnode = rdata->pnode;
5775         }
5776         if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5777                 return FAILED;
5778         return SUCCESS;
5779 }
5780
5781 /**
5782  * lpfc_reset_flush_io_context -
5783  * @vport: The virtual port (scsi_host) for the flush context
5784  * @tgt_id: If aborting by Target contect - specifies the target id
5785  * @lun_id: If aborting by Lun context - specifies the lun id
5786  * @context: specifies the context level to flush at.
5787  *
5788  * After a reset condition via TMF, we need to flush orphaned i/o
5789  * contexts from the adapter. This routine aborts any contexts
5790  * outstanding, then waits for their completions. The wait is
5791  * bounded by devloss_tmo though.
5792  *
5793  * Return code :
5794  *  0x2003 - Error
5795  *  0x2002 - Success
5796  **/
5797 static int
5798 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5799                         uint64_t lun_id, lpfc_ctx_cmd context)
5800 {
5801         struct lpfc_hba   *phba = vport->phba;
5802         unsigned long later;
5803         int cnt;
5804
5805         cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5806         if (cnt)
5807                 lpfc_sli_abort_taskmgmt(vport,
5808                                         &phba->sli.sli3_ring[LPFC_FCP_RING],
5809                                         tgt_id, lun_id, context);
5810         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5811         while (time_after(later, jiffies) && cnt) {
5812                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5813                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5814         }
5815         if (cnt) {
5816                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5817                         "0724 I/O flush failure for context %s : cnt x%x\n",
5818                         ((context == LPFC_CTX_LUN) ? "LUN" :
5819                          ((context == LPFC_CTX_TGT) ? "TGT" :
5820                           ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5821                         cnt);
5822                 return FAILED;
5823         }
5824         return SUCCESS;
5825 }
5826
5827 /**
5828  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5829  * @cmnd: Pointer to scsi_cmnd data structure.
5830  *
5831  * This routine does a device reset by sending a LUN_RESET task management
5832  * command.
5833  *
5834  * Return code :
5835  *  0x2003 - Error
5836  *  0x2002 - Success
5837  **/
5838 static int
5839 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5840 {
5841         struct Scsi_Host  *shost = cmnd->device->host;
5842         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5843         struct lpfc_rport_data *rdata;
5844         struct lpfc_nodelist *pnode;
5845         unsigned tgt_id = cmnd->device->id;
5846         uint64_t lun_id = cmnd->device->lun;
5847         struct lpfc_scsi_event_header scsi_event;
5848         int status;
5849
5850         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5851         if (!rdata || !rdata->pnode) {
5852                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5853                                  "0798 Device Reset rdata failure: rdata x%px\n",
5854                                  rdata);
5855                 return FAILED;
5856         }
5857         pnode = rdata->pnode;
5858         status = fc_block_scsi_eh(cmnd);
5859         if (status != 0 && status != SUCCESS)
5860                 return status;
5861
5862         status = lpfc_chk_tgt_mapped(vport, cmnd);
5863         if (status == FAILED) {
5864                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5865                         "0721 Device Reset rport failure: rdata x%px\n", rdata);
5866                 return FAILED;
5867         }
5868
5869         scsi_event.event_type = FC_REG_SCSI_EVENT;
5870         scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5871         scsi_event.lun = lun_id;
5872         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5873         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5874
5875         fc_host_post_vendor_event(shost, fc_get_event_number(),
5876                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5877
5878         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5879                                                 FCP_LUN_RESET);
5880
5881         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5882                          "0713 SCSI layer issued Device Reset (%d, %llu) "
5883                          "return x%x\n", tgt_id, lun_id, status);
5884
5885         /*
5886          * We have to clean up i/o as : they may be orphaned by the TMF;
5887          * or if the TMF failed, they may be in an indeterminate state.
5888          * So, continue on.
5889          * We will report success if all the i/o aborts successfully.
5890          */
5891         if (status == SUCCESS)
5892                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5893                                                 LPFC_CTX_LUN);
5894
5895         return status;
5896 }
5897
5898 /**
5899  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5900  * @cmnd: Pointer to scsi_cmnd data structure.
5901  *
5902  * This routine does a target reset by sending a TARGET_RESET task management
5903  * command.
5904  *
5905  * Return code :
5906  *  0x2003 - Error
5907  *  0x2002 - Success
5908  **/
5909 static int
5910 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5911 {
5912         struct Scsi_Host  *shost = cmnd->device->host;
5913         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5914         struct lpfc_rport_data *rdata;
5915         struct lpfc_nodelist *pnode;
5916         unsigned tgt_id = cmnd->device->id;
5917         uint64_t lun_id = cmnd->device->lun;
5918         struct lpfc_scsi_event_header scsi_event;
5919         int status;
5920
5921         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5922         if (!rdata || !rdata->pnode) {
5923                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5924                                  "0799 Target Reset rdata failure: rdata x%px\n",
5925                                  rdata);
5926                 return FAILED;
5927         }
5928         pnode = rdata->pnode;
5929         status = fc_block_scsi_eh(cmnd);
5930         if (status != 0 && status != SUCCESS)
5931                 return status;
5932
5933         status = lpfc_chk_tgt_mapped(vport, cmnd);
5934         if (status == FAILED) {
5935                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5936                         "0722 Target Reset rport failure: rdata x%px\n", rdata);
5937                 if (pnode) {
5938                         spin_lock_irq(&pnode->lock);
5939                         pnode->nlp_flag &= ~NLP_NPR_ADISC;
5940                         pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5941                         spin_unlock_irq(&pnode->lock);
5942                 }
5943                 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5944                                           LPFC_CTX_TGT);
5945                 return FAST_IO_FAIL;
5946         }
5947
5948         scsi_event.event_type = FC_REG_SCSI_EVENT;
5949         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5950         scsi_event.lun = 0;
5951         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5952         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5953
5954         fc_host_post_vendor_event(shost, fc_get_event_number(),
5955                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5956
5957         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5958                                         FCP_TARGET_RESET);
5959
5960         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5961                          "0723 SCSI layer issued Target Reset (%d, %llu) "
5962                          "return x%x\n", tgt_id, lun_id, status);
5963
5964         /*
5965          * We have to clean up i/o as : they may be orphaned by the TMF;
5966          * or if the TMF failed, they may be in an indeterminate state.
5967          * So, continue on.
5968          * We will report success if all the i/o aborts successfully.
5969          */
5970         if (status == SUCCESS)
5971                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5972                                           LPFC_CTX_TGT);
5973         return status;
5974 }
5975
5976 /**
5977  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5978  * @cmnd: Pointer to scsi_cmnd data structure.
5979  *
5980  * This routine does target reset to all targets on @cmnd->device->host.
5981  * This emulates Parallel SCSI Bus Reset Semantics.
5982  *
5983  * Return code :
5984  *  0x2003 - Error
5985  *  0x2002 - Success
5986  **/
5987 static int
5988 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5989 {
5990         struct Scsi_Host  *shost = cmnd->device->host;
5991         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5992         struct lpfc_nodelist *ndlp = NULL;
5993         struct lpfc_scsi_event_header scsi_event;
5994         int match;
5995         int ret = SUCCESS, status, i;
5996
5997         scsi_event.event_type = FC_REG_SCSI_EVENT;
5998         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5999         scsi_event.lun = 0;
6000         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6001         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6002
6003         fc_host_post_vendor_event(shost, fc_get_event_number(),
6004                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6005
6006         status = fc_block_scsi_eh(cmnd);
6007         if (status != 0 && status != SUCCESS)
6008                 return status;
6009
6010         /*
6011          * Since the driver manages a single bus device, reset all
6012          * targets known to the driver.  Should any target reset
6013          * fail, this routine returns failure to the midlayer.
6014          */
6015         for (i = 0; i < LPFC_MAX_TARGET; i++) {
6016                 /* Search for mapped node by target ID */
6017                 match = 0;
6018                 spin_lock_irq(shost->host_lock);
6019                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6020
6021                         if (vport->phba->cfg_fcp2_no_tgt_reset &&
6022                             (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6023                                 continue;
6024                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6025                             ndlp->nlp_sid == i &&
6026                             ndlp->rport &&
6027                             ndlp->nlp_type & NLP_FCP_TARGET) {
6028                                 match = 1;
6029                                 break;
6030                         }
6031                 }
6032                 spin_unlock_irq(shost->host_lock);
6033                 if (!match)
6034                         continue;
6035
6036                 status = lpfc_send_taskmgmt(vport, cmnd,
6037                                         i, 0, FCP_TARGET_RESET);
6038
6039                 if (status != SUCCESS) {
6040                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6041                                          "0700 Bus Reset on target %d failed\n",
6042                                          i);
6043                         ret = FAILED;
6044                 }
6045         }
6046         /*
6047          * We have to clean up i/o as : they may be orphaned by the TMFs
6048          * above; or if any of the TMFs failed, they may be in an
6049          * indeterminate state.
6050          * We will report success if all the i/o aborts successfully.
6051          */
6052
6053         status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6054         if (status != SUCCESS)
6055                 ret = FAILED;
6056
6057         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6058                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
6059         return ret;
6060 }
6061
6062 /**
6063  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6064  * @cmnd: Pointer to scsi_cmnd data structure.
6065  *
6066  * This routine does host reset to the adaptor port. It brings the HBA
6067  * offline, performs a board restart, and then brings the board back online.
6068  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6069  * reject all outstanding SCSI commands to the host and error returned
6070  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6071  * of error handling, it will only return error if resetting of the adapter
6072  * is not successful; in all other cases, will return success.
6073  *
6074  * Return code :
6075  *  0x2003 - Error
6076  *  0x2002 - Success
6077  **/
6078 static int
6079 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6080 {
6081         struct Scsi_Host *shost = cmnd->device->host;
6082         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6083         struct lpfc_hba *phba = vport->phba;
6084         int rc, ret = SUCCESS;
6085
6086         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6087                          "3172 SCSI layer issued Host Reset Data:\n");
6088
6089         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6090         lpfc_offline(phba);
6091         rc = lpfc_sli_brdrestart(phba);
6092         if (rc)
6093                 goto error;
6094
6095         rc = lpfc_online(phba);
6096         if (rc)
6097                 goto error;
6098
6099         lpfc_unblock_mgmt_io(phba);
6100
6101         return ret;
6102 error:
6103         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6104                          "3323 Failed host reset\n");
6105         lpfc_unblock_mgmt_io(phba);
6106         return FAILED;
6107 }
6108
6109 /**
6110  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6111  * @sdev: Pointer to scsi_device.
6112  *
6113  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
6114  * globally available list of scsi buffers. This routine also makes sure scsi
6115  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6116  * of scsi buffer exists for the lifetime of the driver.
6117  *
6118  * Return codes:
6119  *   non-0 - Error
6120  *   0 - Success
6121  **/
6122 static int
6123 lpfc_slave_alloc(struct scsi_device *sdev)
6124 {
6125         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6126         struct lpfc_hba   *phba = vport->phba;
6127         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6128         uint32_t total = 0;
6129         uint32_t num_to_alloc = 0;
6130         int num_allocated = 0;
6131         uint32_t sdev_cnt;
6132         struct lpfc_device_data *device_data;
6133         unsigned long flags;
6134         struct lpfc_name target_wwpn;
6135
6136         if (!rport || fc_remote_port_chkready(rport))
6137                 return -ENXIO;
6138
6139         if (phba->cfg_fof) {
6140
6141                 /*
6142                  * Check to see if the device data structure for the lun
6143                  * exists.  If not, create one.
6144                  */
6145
6146                 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6147                 spin_lock_irqsave(&phba->devicelock, flags);
6148                 device_data = __lpfc_get_device_data(phba,
6149                                                      &phba->luns,
6150                                                      &vport->fc_portname,
6151                                                      &target_wwpn,
6152                                                      sdev->lun);
6153                 if (!device_data) {
6154                         spin_unlock_irqrestore(&phba->devicelock, flags);
6155                         device_data = lpfc_create_device_data(phba,
6156                                                         &vport->fc_portname,
6157                                                         &target_wwpn,
6158                                                         sdev->lun,
6159                                                         phba->cfg_XLanePriority,
6160                                                         true);
6161                         if (!device_data)
6162                                 return -ENOMEM;
6163                         spin_lock_irqsave(&phba->devicelock, flags);
6164                         list_add_tail(&device_data->listentry, &phba->luns);
6165                 }
6166                 device_data->rport_data = rport->dd_data;
6167                 device_data->available = true;
6168                 spin_unlock_irqrestore(&phba->devicelock, flags);
6169                 sdev->hostdata = device_data;
6170         } else {
6171                 sdev->hostdata = rport->dd_data;
6172         }
6173         sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6174
6175         /* For SLI4, all IO buffers are pre-allocated */
6176         if (phba->sli_rev == LPFC_SLI_REV4)
6177                 return 0;
6178
6179         /* This code path is now ONLY for SLI3 adapters */
6180
6181         /*
6182          * Populate the cmds_per_lun count scsi_bufs into this host's globally
6183          * available list of scsi buffers.  Don't allocate more than the
6184          * HBA limit conveyed to the midlayer via the host structure.  The
6185          * formula accounts for the lun_queue_depth + error handlers + 1
6186          * extra.  This list of scsi bufs exists for the lifetime of the driver.
6187          */
6188         total = phba->total_scsi_bufs;
6189         num_to_alloc = vport->cfg_lun_queue_depth + 2;
6190
6191         /* If allocated buffers are enough do nothing */
6192         if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6193                 return 0;
6194
6195         /* Allow some exchanges to be available always to complete discovery */
6196         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6197                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6198                                  "0704 At limitation of %d preallocated "
6199                                  "command buffers\n", total);
6200                 return 0;
6201         /* Allow some exchanges to be available always to complete discovery */
6202         } else if (total + num_to_alloc >
6203                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6204                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6205                                  "0705 Allocation request of %d "
6206                                  "command buffers will exceed max of %d.  "
6207                                  "Reducing allocation request to %d.\n",
6208                                  num_to_alloc, phba->cfg_hba_queue_depth,
6209                                  (phba->cfg_hba_queue_depth - total));
6210                 num_to_alloc = phba->cfg_hba_queue_depth - total;
6211         }
6212         num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6213         if (num_to_alloc != num_allocated) {
6214                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6215                                          "0708 Allocation request of %d "
6216                                          "command buffers did not succeed.  "
6217                                          "Allocated %d buffers.\n",
6218                                          num_to_alloc, num_allocated);
6219         }
6220         if (num_allocated > 0)
6221                 phba->total_scsi_bufs += num_allocated;
6222         return 0;
6223 }
6224
6225 /**
6226  * lpfc_slave_configure - scsi_host_template slave_configure entry point
6227  * @sdev: Pointer to scsi_device.
6228  *
6229  * This routine configures following items
6230  *   - Tag command queuing support for @sdev if supported.
6231  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6232  *
6233  * Return codes:
6234  *   0 - Success
6235  **/
6236 static int
6237 lpfc_slave_configure(struct scsi_device *sdev)
6238 {
6239         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6240         struct lpfc_hba   *phba = vport->phba;
6241
6242         scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6243
6244         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6245                 lpfc_sli_handle_fast_ring_event(phba,
6246                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6247                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6248                         lpfc_poll_rearm_timer(phba);
6249         }
6250
6251         return 0;
6252 }
6253
6254 /**
6255  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6256  * @sdev: Pointer to scsi_device.
6257  *
6258  * This routine sets @sdev hostatdata filed to null.
6259  **/
6260 static void
6261 lpfc_slave_destroy(struct scsi_device *sdev)
6262 {
6263         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6264         struct lpfc_hba   *phba = vport->phba;
6265         unsigned long flags;
6266         struct lpfc_device_data *device_data = sdev->hostdata;
6267
6268         atomic_dec(&phba->sdev_cnt);
6269         if ((phba->cfg_fof) && (device_data)) {
6270                 spin_lock_irqsave(&phba->devicelock, flags);
6271                 device_data->available = false;
6272                 if (!device_data->oas_enabled)
6273                         lpfc_delete_device_data(phba, device_data);
6274                 spin_unlock_irqrestore(&phba->devicelock, flags);
6275         }
6276         sdev->hostdata = NULL;
6277         return;
6278 }
6279
6280 /**
6281  * lpfc_create_device_data - creates and initializes device data structure for OAS
6282  * @phba: Pointer to host bus adapter structure.
6283  * @vport_wwpn: Pointer to vport's wwpn information
6284  * @target_wwpn: Pointer to target's wwpn information
6285  * @lun: Lun on target
6286  * @pri: Priority
6287  * @atomic_create: Flag to indicate if memory should be allocated using the
6288  *                GFP_ATOMIC flag or not.
6289  *
6290  * This routine creates a device data structure which will contain identifying
6291  * information for the device (host wwpn, target wwpn, lun), state of OAS,
6292  * whether or not the corresponding lun is available by the system,
6293  * and pointer to the rport data.
6294  *
6295  * Return codes:
6296  *   NULL - Error
6297  *   Pointer to lpfc_device_data - Success
6298  **/
6299 struct lpfc_device_data*
6300 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6301                         struct lpfc_name *target_wwpn, uint64_t lun,
6302                         uint32_t pri, bool atomic_create)
6303 {
6304
6305         struct lpfc_device_data *lun_info;
6306         int memory_flags;
6307
6308         if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
6309             !(phba->cfg_fof))
6310                 return NULL;
6311
6312         /* Attempt to create the device data to contain lun info */
6313
6314         if (atomic_create)
6315                 memory_flags = GFP_ATOMIC;
6316         else
6317                 memory_flags = GFP_KERNEL;
6318         lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6319         if (!lun_info)
6320                 return NULL;
6321         INIT_LIST_HEAD(&lun_info->listentry);
6322         lun_info->rport_data  = NULL;
6323         memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6324                sizeof(struct lpfc_name));
6325         memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6326                sizeof(struct lpfc_name));
6327         lun_info->device_id.lun = lun;
6328         lun_info->oas_enabled = false;
6329         lun_info->priority = pri;
6330         lun_info->available = false;
6331         return lun_info;
6332 }
6333
6334 /**
6335  * lpfc_delete_device_data - frees a device data structure for OAS
6336  * @phba: Pointer to host bus adapter structure.
6337  * @lun_info: Pointer to device data structure to free.
6338  *
6339  * This routine frees the previously allocated device data structure passed.
6340  *
6341  **/
6342 void
6343 lpfc_delete_device_data(struct lpfc_hba *phba,
6344                         struct lpfc_device_data *lun_info)
6345 {
6346
6347         if (unlikely(!phba) || !lun_info  ||
6348             !(phba->cfg_fof))
6349                 return;
6350
6351         if (!list_empty(&lun_info->listentry))
6352                 list_del(&lun_info->listentry);
6353         mempool_free(lun_info, phba->device_data_mem_pool);
6354         return;
6355 }
6356
6357 /**
6358  * __lpfc_get_device_data - returns the device data for the specified lun
6359  * @phba: Pointer to host bus adapter structure.
6360  * @list: Point to list to search.
6361  * @vport_wwpn: Pointer to vport's wwpn information
6362  * @target_wwpn: Pointer to target's wwpn information
6363  * @lun: Lun on target
6364  *
6365  * This routine searches the list passed for the specified lun's device data.
6366  * This function does not hold locks, it is the responsibility of the caller
6367  * to ensure the proper lock is held before calling the function.
6368  *
6369  * Return codes:
6370  *   NULL - Error
6371  *   Pointer to lpfc_device_data - Success
6372  **/
6373 struct lpfc_device_data*
6374 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6375                        struct lpfc_name *vport_wwpn,
6376                        struct lpfc_name *target_wwpn, uint64_t lun)
6377 {
6378
6379         struct lpfc_device_data *lun_info;
6380
6381         if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6382             !phba->cfg_fof)
6383                 return NULL;
6384
6385         /* Check to see if the lun is already enabled for OAS. */
6386
6387         list_for_each_entry(lun_info, list, listentry) {
6388                 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6389                             sizeof(struct lpfc_name)) == 0) &&
6390                     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6391                             sizeof(struct lpfc_name)) == 0) &&
6392                     (lun_info->device_id.lun == lun))
6393                         return lun_info;
6394         }
6395
6396         return NULL;
6397 }
6398
6399 /**
6400  * lpfc_find_next_oas_lun - searches for the next oas lun
6401  * @phba: Pointer to host bus adapter structure.
6402  * @vport_wwpn: Pointer to vport's wwpn information
6403  * @target_wwpn: Pointer to target's wwpn information
6404  * @starting_lun: Pointer to the lun to start searching for
6405  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6406  * @found_target_wwpn: Pointer to the found lun's target wwpn information
6407  * @found_lun: Pointer to the found lun.
6408  * @found_lun_status: Pointer to status of the found lun.
6409  * @found_lun_pri: Pointer to priority of the found lun.
6410  *
6411  * This routine searches the luns list for the specified lun
6412  * or the first lun for the vport/target.  If the vport wwpn contains
6413  * a zero value then a specific vport is not specified. In this case
6414  * any vport which contains the lun will be considered a match.  If the
6415  * target wwpn contains a zero value then a specific target is not specified.
6416  * In this case any target which contains the lun will be considered a
6417  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
6418  * are returned.  The function will also return the next lun if available.
6419  * If the next lun is not found, starting_lun parameter will be set to
6420  * NO_MORE_OAS_LUN.
6421  *
6422  * Return codes:
6423  *   non-0 - Error
6424  *   0 - Success
6425  **/
6426 bool
6427 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6428                        struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6429                        struct lpfc_name *found_vport_wwpn,
6430                        struct lpfc_name *found_target_wwpn,
6431                        uint64_t *found_lun,
6432                        uint32_t *found_lun_status,
6433                        uint32_t *found_lun_pri)
6434 {
6435
6436         unsigned long flags;
6437         struct lpfc_device_data *lun_info;
6438         struct lpfc_device_id *device_id;
6439         uint64_t lun;
6440         bool found = false;
6441
6442         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6443             !starting_lun || !found_vport_wwpn ||
6444             !found_target_wwpn || !found_lun || !found_lun_status ||
6445             (*starting_lun == NO_MORE_OAS_LUN) ||
6446             !phba->cfg_fof)
6447                 return false;
6448
6449         lun = *starting_lun;
6450         *found_lun = NO_MORE_OAS_LUN;
6451         *starting_lun = NO_MORE_OAS_LUN;
6452
6453         /* Search for lun or the lun closet in value */
6454
6455         spin_lock_irqsave(&phba->devicelock, flags);
6456         list_for_each_entry(lun_info, &phba->luns, listentry) {
6457                 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6458                      (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6459                             sizeof(struct lpfc_name)) == 0)) &&
6460                     ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6461                      (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6462                             sizeof(struct lpfc_name)) == 0)) &&
6463                     (lun_info->oas_enabled)) {
6464                         device_id = &lun_info->device_id;
6465                         if ((!found) &&
6466                             ((lun == FIND_FIRST_OAS_LUN) ||
6467                              (device_id->lun == lun))) {
6468                                 *found_lun = device_id->lun;
6469                                 memcpy(found_vport_wwpn,
6470                                        &device_id->vport_wwpn,
6471                                        sizeof(struct lpfc_name));
6472                                 memcpy(found_target_wwpn,
6473                                        &device_id->target_wwpn,
6474                                        sizeof(struct lpfc_name));
6475                                 if (lun_info->available)
6476                                         *found_lun_status =
6477                                                 OAS_LUN_STATUS_EXISTS;
6478                                 else
6479                                         *found_lun_status = 0;
6480                                 *found_lun_pri = lun_info->priority;
6481                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6482                                         memset(vport_wwpn, 0x0,
6483                                                sizeof(struct lpfc_name));
6484                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6485                                         memset(target_wwpn, 0x0,
6486                                                sizeof(struct lpfc_name));
6487                                 found = true;
6488                         } else if (found) {
6489                                 *starting_lun = device_id->lun;
6490                                 memcpy(vport_wwpn, &device_id->vport_wwpn,
6491                                        sizeof(struct lpfc_name));
6492                                 memcpy(target_wwpn, &device_id->target_wwpn,
6493                                        sizeof(struct lpfc_name));
6494                                 break;
6495                         }
6496                 }
6497         }
6498         spin_unlock_irqrestore(&phba->devicelock, flags);
6499         return found;
6500 }
6501
6502 /**
6503  * lpfc_enable_oas_lun - enables a lun for OAS operations
6504  * @phba: Pointer to host bus adapter structure.
6505  * @vport_wwpn: Pointer to vport's wwpn information
6506  * @target_wwpn: Pointer to target's wwpn information
6507  * @lun: Lun
6508  * @pri: Priority
6509  *
6510  * This routine enables a lun for oas operations.  The routines does so by
6511  * doing the following :
6512  *
6513  *   1) Checks to see if the device data for the lun has been created.
6514  *   2) If found, sets the OAS enabled flag if not set and returns.
6515  *   3) Otherwise, creates a device data structure.
6516  *   4) If successfully created, indicates the device data is for an OAS lun,
6517  *   indicates the lun is not available and add to the list of luns.
6518  *
6519  * Return codes:
6520  *   false - Error
6521  *   true - Success
6522  **/
6523 bool
6524 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6525                     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6526 {
6527
6528         struct lpfc_device_data *lun_info;
6529         unsigned long flags;
6530
6531         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6532             !phba->cfg_fof)
6533                 return false;
6534
6535         spin_lock_irqsave(&phba->devicelock, flags);
6536
6537         /* Check to see if the device data for the lun has been created */
6538         lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6539                                           target_wwpn, lun);
6540         if (lun_info) {
6541                 if (!lun_info->oas_enabled)
6542                         lun_info->oas_enabled = true;
6543                 lun_info->priority = pri;
6544                 spin_unlock_irqrestore(&phba->devicelock, flags);
6545                 return true;
6546         }
6547
6548         /* Create an lun info structure and add to list of luns */
6549         lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6550                                            pri, true);
6551         if (lun_info) {
6552                 lun_info->oas_enabled = true;
6553                 lun_info->priority = pri;
6554                 lun_info->available = false;
6555                 list_add_tail(&lun_info->listentry, &phba->luns);
6556                 spin_unlock_irqrestore(&phba->devicelock, flags);
6557                 return true;
6558         }
6559         spin_unlock_irqrestore(&phba->devicelock, flags);
6560         return false;
6561 }
6562
6563 /**
6564  * lpfc_disable_oas_lun - disables a lun for OAS operations
6565  * @phba: Pointer to host bus adapter structure.
6566  * @vport_wwpn: Pointer to vport's wwpn information
6567  * @target_wwpn: Pointer to target's wwpn information
6568  * @lun: Lun
6569  * @pri: Priority
6570  *
6571  * This routine disables a lun for oas operations.  The routines does so by
6572  * doing the following :
6573  *
6574  *   1) Checks to see if the device data for the lun is created.
6575  *   2) If present, clears the flag indicating this lun is for OAS.
6576  *   3) If the lun is not available by the system, the device data is
6577  *   freed.
6578  *
6579  * Return codes:
6580  *   false - Error
6581  *   true - Success
6582  **/
6583 bool
6584 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6585                      struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6586 {
6587
6588         struct lpfc_device_data *lun_info;
6589         unsigned long flags;
6590
6591         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6592             !phba->cfg_fof)
6593                 return false;
6594
6595         spin_lock_irqsave(&phba->devicelock, flags);
6596
6597         /* Check to see if the lun is available. */
6598         lun_info = __lpfc_get_device_data(phba,
6599                                           &phba->luns, vport_wwpn,
6600                                           target_wwpn, lun);
6601         if (lun_info) {
6602                 lun_info->oas_enabled = false;
6603                 lun_info->priority = pri;
6604                 if (!lun_info->available)
6605                         lpfc_delete_device_data(phba, lun_info);
6606                 spin_unlock_irqrestore(&phba->devicelock, flags);
6607                 return true;
6608         }
6609
6610         spin_unlock_irqrestore(&phba->devicelock, flags);
6611         return false;
6612 }
6613
6614 static int
6615 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6616 {
6617         return SCSI_MLQUEUE_HOST_BUSY;
6618 }
6619
6620 static int
6621 lpfc_no_handler(struct scsi_cmnd *cmnd)
6622 {
6623         return FAILED;
6624 }
6625
6626 static int
6627 lpfc_no_slave(struct scsi_device *sdev)
6628 {
6629         return -ENODEV;
6630 }
6631
6632 struct scsi_host_template lpfc_template_nvme = {
6633         .module                 = THIS_MODULE,
6634         .name                   = LPFC_DRIVER_NAME,
6635         .proc_name              = LPFC_DRIVER_NAME,
6636         .info                   = lpfc_info,
6637         .queuecommand           = lpfc_no_command,
6638         .eh_abort_handler       = lpfc_no_handler,
6639         .eh_device_reset_handler = lpfc_no_handler,
6640         .eh_target_reset_handler = lpfc_no_handler,
6641         .eh_bus_reset_handler   = lpfc_no_handler,
6642         .eh_host_reset_handler  = lpfc_no_handler,
6643         .slave_alloc            = lpfc_no_slave,
6644         .slave_configure        = lpfc_no_slave,
6645         .scan_finished          = lpfc_scan_finished,
6646         .this_id                = -1,
6647         .sg_tablesize           = 1,
6648         .cmd_per_lun            = 1,
6649         .shost_attrs            = lpfc_hba_attrs,
6650         .max_sectors            = 0xFFFFFFFF,
6651         .vendor_id              = LPFC_NL_VENDOR_ID,
6652         .track_queue_depth      = 0,
6653 };
6654
6655 struct scsi_host_template lpfc_template = {
6656         .module                 = THIS_MODULE,
6657         .name                   = LPFC_DRIVER_NAME,
6658         .proc_name              = LPFC_DRIVER_NAME,
6659         .info                   = lpfc_info,
6660         .queuecommand           = lpfc_queuecommand,
6661         .eh_timed_out           = fc_eh_timed_out,
6662         .eh_abort_handler       = lpfc_abort_handler,
6663         .eh_device_reset_handler = lpfc_device_reset_handler,
6664         .eh_target_reset_handler = lpfc_target_reset_handler,
6665         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
6666         .eh_host_reset_handler  = lpfc_host_reset_handler,
6667         .slave_alloc            = lpfc_slave_alloc,
6668         .slave_configure        = lpfc_slave_configure,
6669         .slave_destroy          = lpfc_slave_destroy,
6670         .scan_finished          = lpfc_scan_finished,
6671         .this_id                = -1,
6672         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
6673         .cmd_per_lun            = LPFC_CMD_PER_LUN,
6674         .shost_attrs            = lpfc_hba_attrs,
6675         .max_sectors            = 0xFFFFFFFF,
6676         .vendor_id              = LPFC_NL_VENDOR_ID,
6677         .change_queue_depth     = scsi_change_queue_depth,
6678         .track_queue_depth      = 1,
6679 };