1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <linux/blk-cgroup.h>
32 #include <net/checksum.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_eh.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/scsi_transport_fc.h>
41 #include "lpfc_version.h"
45 #include "lpfc_sli4.h"
47 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
54 #define LPFC_RESET_WAIT 2
55 #define LPFC_ABORT_WAIT 2
57 static char *dif_op_str[] = {
67 struct scsi_dif_tuple {
68 __be16 guard_tag; /* Checksum */
69 __be16 app_tag; /* Opaque storage */
70 __be32 ref_tag; /* Target LBA or indirect LBA */
73 static struct lpfc_rport_data *
74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78 if (vport->phba->cfg_fof)
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81 return (struct lpfc_rport_data *)sdev->hostdata;
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
92 struct lpfc_vmid *vmp);
93 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
94 *cmd, struct lpfc_vmid *vmp,
95 union lpfc_vmid_io_tag *tag);
96 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
97 struct lpfc_vmid *vmid);
100 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
101 * @phba: Pointer to HBA object.
102 * @lpfc_cmd: lpfc scsi command object pointer.
104 * This function is called from the lpfc_prep_task_mgmt_cmd function to
105 * set the last bit in the response sge entry.
108 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
109 struct lpfc_io_buf *lpfc_cmd)
111 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
114 sgl->word2 = le32_to_cpu(sgl->word2);
115 bf_set(lpfc_sli4_sge_last, sgl, 1);
116 sgl->word2 = cpu_to_le32(sgl->word2);
120 #define LPFC_INVALID_REFTAG ((u32)-1)
123 * lpfc_update_stats - Update statistical data for the command completion
124 * @vport: The virtual port on which this call is executing.
125 * @lpfc_cmd: lpfc scsi command object pointer.
127 * This function is called when there is a command completion and this
128 * function updates the statistical data for the command completion.
131 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
133 struct lpfc_hba *phba = vport->phba;
134 struct lpfc_rport_data *rdata;
135 struct lpfc_nodelist *pnode;
136 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
139 unsigned long latency;
142 if (!vport->stat_data_enabled ||
143 vport->stat_data_blocked ||
147 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
148 rdata = lpfc_cmd->rdata;
149 pnode = rdata->pnode;
151 spin_lock_irqsave(shost->host_lock, flags);
154 (phba->bucket_type == LPFC_NO_BUCKET)) {
155 spin_unlock_irqrestore(shost->host_lock, flags);
159 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
160 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
162 /* check array subscript bounds */
165 else if (i >= LPFC_MAX_BUCKET_COUNT)
166 i = LPFC_MAX_BUCKET_COUNT - 1;
168 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
169 if (latency <= (phba->bucket_base +
170 ((1<<i)*phba->bucket_step)))
174 pnode->lat_data[i].cmd_count++;
175 spin_unlock_irqrestore(shost->host_lock, flags);
179 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
180 * @phba: The Hba for which this call is being executed.
182 * This routine is called when there is resource error in driver or firmware.
183 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
184 * posts at most 1 event each second. This routine wakes up worker thread of
185 * @phba to process WORKER_RAM_DOWN_EVENT event.
187 * This routine should be called with no lock held.
190 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
194 unsigned long expires;
196 spin_lock_irqsave(&phba->hbalock, flags);
197 atomic_inc(&phba->num_rsrc_err);
198 phba->last_rsrc_error_time = jiffies;
200 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
201 if (time_after(expires, jiffies)) {
202 spin_unlock_irqrestore(&phba->hbalock, flags);
206 phba->last_ramp_down_time = jiffies;
208 spin_unlock_irqrestore(&phba->hbalock, flags);
210 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
211 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
213 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
214 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
217 lpfc_worker_wake_up(phba);
222 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
223 * @phba: The Hba for which this call is being executed.
225 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
226 * thread.This routine reduces queue depth for all scsi device on each vport
227 * associated with @phba.
230 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
232 struct lpfc_vport **vports;
233 struct Scsi_Host *shost;
234 struct scsi_device *sdev;
235 unsigned long new_queue_depth;
236 unsigned long num_rsrc_err, num_cmd_success;
239 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
240 num_cmd_success = atomic_read(&phba->num_cmd_success);
243 * The error and success command counters are global per
244 * driver instance. If another handler has already
245 * operated on this error event, just exit.
247 if (num_rsrc_err == 0)
250 vports = lpfc_create_vport_work_array(phba);
252 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
253 shost = lpfc_shost_from_vport(vports[i]);
254 shost_for_each_device(sdev, shost) {
256 sdev->queue_depth * num_rsrc_err /
257 (num_rsrc_err + num_cmd_success);
258 if (!new_queue_depth)
259 new_queue_depth = sdev->queue_depth - 1;
261 new_queue_depth = sdev->queue_depth -
263 scsi_change_queue_depth(sdev, new_queue_depth);
266 lpfc_destroy_vport_work_array(phba, vports);
267 atomic_set(&phba->num_rsrc_err, 0);
268 atomic_set(&phba->num_cmd_success, 0);
272 * lpfc_scsi_dev_block - set all scsi hosts to block state
273 * @phba: Pointer to HBA context object.
275 * This function walks vport list and set each SCSI host to block state
276 * by invoking fc_remote_port_delete() routine. This function is invoked
277 * with EEH when device's PCI slot has been permanently disabled.
280 lpfc_scsi_dev_block(struct lpfc_hba *phba)
282 struct lpfc_vport **vports;
283 struct Scsi_Host *shost;
284 struct scsi_device *sdev;
285 struct fc_rport *rport;
288 vports = lpfc_create_vport_work_array(phba);
290 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
291 shost = lpfc_shost_from_vport(vports[i]);
292 shost_for_each_device(sdev, shost) {
293 rport = starget_to_rport(scsi_target(sdev));
294 fc_remote_port_delete(rport);
297 lpfc_destroy_vport_work_array(phba, vports);
301 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
302 * @vport: The virtual port for which this call being executed.
303 * @num_to_alloc: The requested number of buffers to allocate.
305 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
306 * the scsi buffer contains all the necessary information needed to initiate
307 * a SCSI I/O. The non-DMAable buffer region contains information to build
308 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
309 * and the initial BPL. In addition to allocating memory, the FCP CMND and
310 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
313 * int - number of scsi buffers that were allocated.
314 * 0 = failure, less than num_to_alloc is a partial failure.
317 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
319 struct lpfc_hba *phba = vport->phba;
320 struct lpfc_io_buf *psb;
321 struct ulp_bde64 *bpl;
323 dma_addr_t pdma_phys_fcp_cmd;
324 dma_addr_t pdma_phys_fcp_rsp;
325 dma_addr_t pdma_phys_sgl;
329 bpl_size = phba->cfg_sg_dma_buf_size -
330 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
333 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
334 num_to_alloc, phba->cfg_sg_dma_buf_size,
335 (int)sizeof(struct fcp_cmnd),
336 (int)sizeof(struct fcp_rsp), bpl_size);
338 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
339 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
344 * Get memory from the pci pool to map the virt space to pci
345 * bus space for an I/O. The DMA buffer includes space for the
346 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
347 * necessary to support the sg_tablesize.
349 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
350 GFP_KERNEL, &psb->dma_handle);
357 /* Allocate iotag for psb->cur_iocbq. */
358 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
360 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
361 psb->data, psb->dma_handle);
365 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
367 psb->fcp_cmnd = psb->data;
368 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
369 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
370 sizeof(struct fcp_rsp);
372 /* Initialize local short-hand pointers. */
373 bpl = (struct ulp_bde64 *)psb->dma_sgl;
374 pdma_phys_fcp_cmd = psb->dma_handle;
375 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
376 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
377 sizeof(struct fcp_rsp);
380 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
381 * are sg list bdes. Initialize the first two and leave the
382 * rest for queuecommand.
384 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
385 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
386 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
387 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
388 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
390 /* Setup the physical region for the FCP RSP */
391 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
392 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
393 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
394 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
395 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
398 * Since the IOCB for the FCP I/O is built into this
399 * lpfc_scsi_buf, initialize it with all known data now.
401 iocb = &psb->cur_iocbq.iocb;
402 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
403 if ((phba->sli_rev == 3) &&
404 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
405 /* fill in immediate fcp command BDE */
406 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
407 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
408 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
410 iocb->un.fcpi64.bdl.addrHigh = 0;
411 iocb->ulpBdeCount = 0;
413 /* fill in response BDE */
414 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
416 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
417 sizeof(struct fcp_rsp);
418 iocb->unsli3.fcp_ext.rbde.addrLow =
419 putPaddrLow(pdma_phys_fcp_rsp);
420 iocb->unsli3.fcp_ext.rbde.addrHigh =
421 putPaddrHigh(pdma_phys_fcp_rsp);
423 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
424 iocb->un.fcpi64.bdl.bdeSize =
425 (2 * sizeof(struct ulp_bde64));
426 iocb->un.fcpi64.bdl.addrLow =
427 putPaddrLow(pdma_phys_sgl);
428 iocb->un.fcpi64.bdl.addrHigh =
429 putPaddrHigh(pdma_phys_sgl);
430 iocb->ulpBdeCount = 1;
433 iocb->ulpClass = CLASS3;
434 psb->status = IOSTAT_SUCCESS;
435 /* Put it back into the SCSI buffer list */
436 psb->cur_iocbq.context1 = psb;
437 spin_lock_init(&psb->buf_lock);
438 lpfc_release_scsi_buf_s3(phba, psb);
446 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
447 * @vport: pointer to lpfc vport data structure.
449 * This routine is invoked by the vport cleanup for deletions and the cleanup
450 * for an ndlp on removal.
453 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
455 struct lpfc_hba *phba = vport->phba;
456 struct lpfc_io_buf *psb, *next_psb;
457 struct lpfc_sli4_hdw_queue *qp;
458 unsigned long iflag = 0;
461 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
464 spin_lock_irqsave(&phba->hbalock, iflag);
465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
466 qp = &phba->sli4_hba.hdwq[idx];
468 spin_lock(&qp->abts_io_buf_list_lock);
469 list_for_each_entry_safe(psb, next_psb,
470 &qp->lpfc_abts_io_buf_list, list) {
471 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
474 if (psb->rdata && psb->rdata->pnode &&
475 psb->rdata->pnode->vport == vport)
478 spin_unlock(&qp->abts_io_buf_list_lock);
480 spin_unlock_irqrestore(&phba->hbalock, iflag);
484 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
485 * @phba: pointer to lpfc hba data structure.
486 * @axri: pointer to the fcp xri abort wcqe structure.
487 * @idx: index into hdwq
489 * This routine is invoked by the worker thread to process a SLI4 fast-path
490 * FCP or NVME aborted xri.
493 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
494 struct sli4_wcqe_xri_aborted *axri, int idx)
498 struct lpfc_io_buf *psb, *next_psb;
499 struct lpfc_sli4_hdw_queue *qp;
500 unsigned long iflag = 0;
501 struct lpfc_iocbq *iocbq;
503 struct lpfc_nodelist *ndlp;
505 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
506 struct scsi_cmnd *cmd;
509 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
511 offline = pci_channel_offline(phba->pcidev);
513 xri = bf_get(lpfc_wcqe_xa_xri, axri);
514 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
516 qp = &phba->sli4_hba.hdwq[idx];
517 spin_lock_irqsave(&phba->hbalock, iflag);
518 spin_lock(&qp->abts_io_buf_list_lock);
519 list_for_each_entry_safe(psb, next_psb,
520 &qp->lpfc_abts_io_buf_list, list) {
522 xri = psb->cur_iocbq.sli4_xritag;
523 if (psb->cur_iocbq.sli4_xritag == xri) {
524 list_del_init(&psb->list);
525 psb->flags &= ~LPFC_SBUF_XBUSY;
526 psb->status = IOSTAT_SUCCESS;
527 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
528 qp->abts_nvme_io_bufs--;
529 spin_unlock(&qp->abts_io_buf_list_lock);
530 spin_unlock_irqrestore(&phba->hbalock, iflag);
532 lpfc_sli4_nvme_xri_aborted(phba, axri,
536 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
537 spin_lock_irqsave(&phba->hbalock, iflag);
538 spin_lock(&qp->abts_io_buf_list_lock);
541 qp->abts_scsi_io_bufs--;
542 spin_unlock(&qp->abts_io_buf_list_lock);
544 if (psb->rdata && psb->rdata->pnode)
545 ndlp = psb->rdata->pnode;
549 rrq_empty = list_empty(&phba->active_rrq_list);
550 spin_unlock_irqrestore(&phba->hbalock, iflag);
551 if (ndlp && !offline) {
552 lpfc_set_rrq_active(phba, ndlp,
553 psb->cur_iocbq.sli4_lxritag, rxid, 1);
554 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
557 if (phba->cfg_fcp_wait_abts_rsp || offline) {
558 spin_lock_irqsave(&psb->buf_lock, iflag);
561 spin_unlock_irqrestore(&psb->buf_lock, iflag);
563 /* The sdev is not guaranteed to be valid post
570 * We expect there is an abort thread waiting
571 * for command completion wake up the thread.
573 spin_lock_irqsave(&psb->buf_lock, iflag);
574 psb->cur_iocbq.cmd_flag &=
575 ~LPFC_DRIVER_ABORTED;
578 spin_unlock_irqrestore(&psb->buf_lock, iflag);
581 lpfc_release_scsi_buf_s4(phba, psb);
583 lpfc_worker_wake_up(phba);
586 spin_lock_irqsave(&phba->hbalock, iflag);
587 spin_lock(&qp->abts_io_buf_list_lock);
591 spin_unlock(&qp->abts_io_buf_list_lock);
593 for (i = 1; i <= phba->sli.last_iotag; i++) {
594 iocbq = phba->sli.iocbq_lookup[i];
596 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
597 (iocbq->cmd_flag & LPFC_IO_LIBDFC))
599 if (iocbq->sli4_xritag != xri)
601 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
602 psb->flags &= ~LPFC_SBUF_XBUSY;
603 spin_unlock_irqrestore(&phba->hbalock, iflag);
604 if (!list_empty(&pring->txq))
605 lpfc_worker_wake_up(phba);
609 spin_unlock_irqrestore(&phba->hbalock, iflag);
613 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
614 * @phba: The HBA for which this call is being executed.
615 * @ndlp: pointer to a node-list data structure.
616 * @cmnd: Pointer to scsi_cmnd data structure.
618 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
619 * and returns to caller.
623 * Pointer to lpfc_scsi_buf - Success
625 static struct lpfc_io_buf *
626 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
627 struct scsi_cmnd *cmnd)
629 struct lpfc_io_buf *lpfc_cmd = NULL;
630 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
631 unsigned long iflag = 0;
633 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
634 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
637 spin_lock(&phba->scsi_buf_list_put_lock);
638 list_splice(&phba->lpfc_scsi_buf_list_put,
639 &phba->lpfc_scsi_buf_list_get);
640 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
641 list_remove_head(scsi_buf_list_get, lpfc_cmd,
642 struct lpfc_io_buf, list);
643 spin_unlock(&phba->scsi_buf_list_put_lock);
645 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
647 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
648 atomic_inc(&ndlp->cmd_pending);
649 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
654 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
655 * @phba: The HBA for which this call is being executed.
656 * @ndlp: pointer to a node-list data structure.
657 * @cmnd: Pointer to scsi_cmnd data structure.
659 * This routine removes a scsi buffer from head of @hdwq io_buf_list
660 * and returns to caller.
664 * Pointer to lpfc_scsi_buf - Success
666 static struct lpfc_io_buf *
667 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
668 struct scsi_cmnd *cmnd)
670 struct lpfc_io_buf *lpfc_cmd;
671 struct lpfc_sli4_hdw_queue *qp;
672 struct sli4_sge *sgl;
673 dma_addr_t pdma_phys_fcp_rsp;
674 dma_addr_t pdma_phys_fcp_cmd;
677 struct fcp_cmd_rsp_buf *tmp = NULL;
679 cpu = raw_smp_processor_id();
680 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
681 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
682 idx = blk_mq_unique_tag_to_hwq(tag);
684 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
687 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
688 !phba->cfg_xri_rebalancing);
690 qp = &phba->sli4_hba.hdwq[idx];
695 /* Setup key fields in buffer that may have been changed
696 * if other protocols used this buffer.
698 lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
699 lpfc_cmd->prot_seg_cnt = 0;
700 lpfc_cmd->seg_cnt = 0;
701 lpfc_cmd->timeout = 0;
703 lpfc_cmd->start_time = jiffies;
704 lpfc_cmd->waitq = NULL;
706 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
707 lpfc_cmd->prot_data_type = 0;
709 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
711 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
715 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
716 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
719 * The first two SGEs are the FCP_CMD and FCP_RSP.
720 * The balance are sg list bdes. Initialize the
721 * first two and leave the rest for queuecommand.
723 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
724 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
725 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
726 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
727 sgl->word2 = le32_to_cpu(sgl->word2);
728 bf_set(lpfc_sli4_sge_last, sgl, 0);
729 sgl->word2 = cpu_to_le32(sgl->word2);
730 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
733 /* Setup the physical region for the FCP RSP */
734 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
735 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
736 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
737 sgl->word2 = le32_to_cpu(sgl->word2);
738 bf_set(lpfc_sli4_sge_last, sgl, 1);
739 sgl->word2 = cpu_to_le32(sgl->word2);
740 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
742 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
743 atomic_inc(&ndlp->cmd_pending);
744 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
749 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
750 * @phba: The HBA for which this call is being executed.
751 * @ndlp: pointer to a node-list data structure.
752 * @cmnd: Pointer to scsi_cmnd data structure.
754 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
755 * and returns to caller.
759 * Pointer to lpfc_scsi_buf - Success
761 static struct lpfc_io_buf*
762 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
763 struct scsi_cmnd *cmnd)
765 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
769 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
770 * @phba: The Hba for which this call is being executed.
771 * @psb: The scsi buffer which is being released.
773 * This routine releases @psb scsi buffer by adding it to tail of @phba
774 * lpfc_scsi_buf_list list.
777 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
779 unsigned long iflag = 0;
782 psb->prot_seg_cnt = 0;
784 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
786 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
787 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
788 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
792 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
793 * @phba: The Hba for which this call is being executed.
794 * @psb: The scsi buffer which is being released.
796 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
797 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
798 * and cannot be reused for at least RA_TOV amount of time if it was
802 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
804 struct lpfc_sli4_hdw_queue *qp;
805 unsigned long iflag = 0;
808 psb->prot_seg_cnt = 0;
811 if (psb->flags & LPFC_SBUF_XBUSY) {
812 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
813 if (!phba->cfg_fcp_wait_abts_rsp)
815 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
816 qp->abts_scsi_io_bufs++;
817 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
819 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
824 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
825 * @phba: The Hba for which this call is being executed.
826 * @psb: The scsi buffer which is being released.
828 * This routine releases @psb scsi buffer by adding it to tail of @phba
829 * lpfc_scsi_buf_list list.
832 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
834 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
835 atomic_dec(&psb->ndlp->cmd_pending);
837 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
838 phba->lpfc_release_scsi_buf(phba, psb);
842 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
843 * @data: A pointer to the immediate command data portion of the IOCB.
844 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
846 * The routine copies the entire FCP command from @fcp_cmnd to @data while
847 * byte swapping the data to big endian format for transmission on the wire.
850 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
854 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
855 i += sizeof(uint32_t), j++) {
856 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
861 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
862 * @phba: The Hba for which this call is being executed.
863 * @lpfc_cmd: The scsi buffer which is going to be mapped.
865 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
866 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
867 * through sg elements and format the bde. This routine also initializes all
868 * IOCB fields which are dependent on scsi command request buffer.
875 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
877 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
878 struct scatterlist *sgel = NULL;
879 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
880 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
881 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
882 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
883 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
885 uint32_t num_bde = 0;
886 int nseg, datadir = scsi_cmnd->sc_data_direction;
889 * There are three possibilities here - use scatter-gather segment, use
890 * the single mapping, or neither. Start the lpfc command prep by
891 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
895 if (scsi_sg_count(scsi_cmnd)) {
897 * The driver stores the segment count returned from dma_map_sg
898 * because this a count of dma-mappings used to map the use_sg
899 * pages. They are not guaranteed to be the same for those
900 * architectures that implement an IOMMU.
903 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
904 scsi_sg_count(scsi_cmnd), datadir);
908 lpfc_cmd->seg_cnt = nseg;
909 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
911 "9064 BLKGRD: %s: Too many sg segments"
912 " from dma_map_sg. Config %d, seg_cnt"
913 " %d\n", __func__, phba->cfg_sg_seg_cnt,
915 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
916 lpfc_cmd->seg_cnt = 0;
917 scsi_dma_unmap(scsi_cmnd);
922 * The driver established a maximum scatter-gather segment count
923 * during probe that limits the number of sg elements in any
924 * single scsi command. Just run through the seg_cnt and format
926 * When using SLI-3 the driver will try to fit all the BDEs into
927 * the IOCB. If it can't then the BDEs get added to a BPL as it
928 * does for SLI-2 mode.
930 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
931 physaddr = sg_dma_address(sgel);
932 if (phba->sli_rev == 3 &&
933 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
934 !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
935 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
936 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
937 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
938 data_bde->addrLow = putPaddrLow(physaddr);
939 data_bde->addrHigh = putPaddrHigh(physaddr);
942 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
943 bpl->tus.f.bdeSize = sg_dma_len(sgel);
944 bpl->tus.w = le32_to_cpu(bpl->tus.w);
946 le32_to_cpu(putPaddrLow(physaddr));
948 le32_to_cpu(putPaddrHigh(physaddr));
955 * Finish initializing those IOCB fields that are dependent on the
956 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
957 * explicitly reinitialized and for SLI-3 the extended bde count is
958 * explicitly reinitialized since all iocb memory resources are reused.
960 if (phba->sli_rev == 3 &&
961 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
962 !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
963 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
965 * The extended IOCB format can only fit 3 BDE or a BPL.
966 * This I/O has more than 3 BDE so the 1st data bde will
967 * be a BPL that is filled in here.
969 physaddr = lpfc_cmd->dma_handle;
970 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
971 data_bde->tus.f.bdeSize = (num_bde *
972 sizeof(struct ulp_bde64));
973 physaddr += (sizeof(struct fcp_cmnd) +
974 sizeof(struct fcp_rsp) +
975 (2 * sizeof(struct ulp_bde64)));
976 data_bde->addrHigh = putPaddrHigh(physaddr);
977 data_bde->addrLow = putPaddrLow(physaddr);
978 /* ebde count includes the response bde and data bpl */
979 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
981 /* ebde count includes the response bde and data bdes */
982 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
985 iocb_cmd->un.fcpi64.bdl.bdeSize =
986 ((num_bde + 2) * sizeof(struct ulp_bde64));
987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
989 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
992 * Due to difference in data length between DIF/non-DIF paths,
993 * we need to set word 4 of IOCB here
995 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
996 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1000 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1002 /* Return BG_ERR_INIT if error injection is detected by Initiator */
1003 #define BG_ERR_INIT 0x1
1004 /* Return BG_ERR_TGT if error injection is detected by Target */
1005 #define BG_ERR_TGT 0x2
1006 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1007 #define BG_ERR_SWAP 0x10
1009 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1012 #define BG_ERR_CHECK 0x20
1015 * lpfc_bg_err_inject - Determine if we should inject an error
1016 * @phba: The Hba for which this call is being executed.
1017 * @sc: The SCSI command to examine
1018 * @reftag: (out) BlockGuard reference tag for transmitted data
1019 * @apptag: (out) BlockGuard application tag for transmitted data
1020 * @new_guard: (in) Value to replace CRC with if needed
1022 * Returns BG_ERR_* bit mask or 0 if request ignored
1025 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1026 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1028 struct scatterlist *sgpe; /* s/g prot entry */
1029 struct lpfc_io_buf *lpfc_cmd = NULL;
1030 struct scsi_dif_tuple *src = NULL;
1031 struct lpfc_nodelist *ndlp;
1032 struct lpfc_rport_data *rdata;
1033 uint32_t op = scsi_get_prot_op(sc);
1040 if (op == SCSI_PROT_NORMAL)
1043 sgpe = scsi_prot_sglist(sc);
1044 lba = scsi_prot_ref_tag(sc);
1045 if (lba == LPFC_INVALID_REFTAG)
1048 /* First check if we need to match the LBA */
1049 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1050 blksize = scsi_prot_interval(sc);
1051 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1053 /* Make sure we have the right LBA if one is specified */
1054 if (phba->lpfc_injerr_lba < (u64)lba ||
1055 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1058 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1059 numblks = sg_dma_len(sgpe) /
1060 sizeof(struct scsi_dif_tuple);
1061 if (numblks < blockoff)
1066 /* Next check if we need to match the remote NPortID or WWPN */
1067 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1068 if (rdata && rdata->pnode) {
1069 ndlp = rdata->pnode;
1071 /* Make sure we have the right NPortID if one is specified */
1072 if (phba->lpfc_injerr_nportid &&
1073 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1077 * Make sure we have the right WWPN if one is specified.
1078 * wwn[0] should be a non-zero NAA in a good WWPN.
1080 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1081 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1082 sizeof(struct lpfc_name)) != 0))
1086 /* Setup a ptr to the protection data if the SCSI host provides it */
1088 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1090 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1093 /* Should we change the Reference Tag */
1095 if (phba->lpfc_injerr_wref_cnt) {
1097 case SCSI_PROT_WRITE_PASS:
1100 * For WRITE_PASS, force the error
1101 * to be sent on the wire. It should
1102 * be detected by the Target.
1103 * If blockoff != 0 error will be
1104 * inserted in middle of the IO.
1107 lpfc_printf_log(phba, KERN_ERR,
1109 "9076 BLKGRD: Injecting reftag error: "
1110 "write lba x%lx + x%x oldrefTag x%x\n",
1111 (unsigned long)lba, blockoff,
1112 be32_to_cpu(src->ref_tag));
1115 * Save the old ref_tag so we can
1116 * restore it on completion.
1119 lpfc_cmd->prot_data_type =
1121 lpfc_cmd->prot_data_segment =
1123 lpfc_cmd->prot_data =
1126 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1127 phba->lpfc_injerr_wref_cnt--;
1128 if (phba->lpfc_injerr_wref_cnt == 0) {
1129 phba->lpfc_injerr_nportid = 0;
1130 phba->lpfc_injerr_lba =
1131 LPFC_INJERR_LBA_OFF;
1132 memset(&phba->lpfc_injerr_wwpn,
1133 0, sizeof(struct lpfc_name));
1135 rc = BG_ERR_TGT | BG_ERR_CHECK;
1140 case SCSI_PROT_WRITE_INSERT:
1142 * For WRITE_INSERT, force the error
1143 * to be sent on the wire. It should be
1144 * detected by the Target.
1146 /* DEADBEEF will be the reftag on the wire */
1147 *reftag = 0xDEADBEEF;
1148 phba->lpfc_injerr_wref_cnt--;
1149 if (phba->lpfc_injerr_wref_cnt == 0) {
1150 phba->lpfc_injerr_nportid = 0;
1151 phba->lpfc_injerr_lba =
1152 LPFC_INJERR_LBA_OFF;
1153 memset(&phba->lpfc_injerr_wwpn,
1154 0, sizeof(struct lpfc_name));
1156 rc = BG_ERR_TGT | BG_ERR_CHECK;
1158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1159 "9078 BLKGRD: Injecting reftag error: "
1160 "write lba x%lx\n", (unsigned long)lba);
1162 case SCSI_PROT_WRITE_STRIP:
1164 * For WRITE_STRIP and WRITE_PASS,
1165 * force the error on data
1166 * being copied from SLI-Host to SLI-Port.
1168 *reftag = 0xDEADBEEF;
1169 phba->lpfc_injerr_wref_cnt--;
1170 if (phba->lpfc_injerr_wref_cnt == 0) {
1171 phba->lpfc_injerr_nportid = 0;
1172 phba->lpfc_injerr_lba =
1173 LPFC_INJERR_LBA_OFF;
1174 memset(&phba->lpfc_injerr_wwpn,
1175 0, sizeof(struct lpfc_name));
1179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1180 "9077 BLKGRD: Injecting reftag error: "
1181 "write lba x%lx\n", (unsigned long)lba);
1185 if (phba->lpfc_injerr_rref_cnt) {
1187 case SCSI_PROT_READ_INSERT:
1188 case SCSI_PROT_READ_STRIP:
1189 case SCSI_PROT_READ_PASS:
1191 * For READ_STRIP and READ_PASS, force the
1192 * error on data being read off the wire. It
1193 * should force an IO error to the driver.
1195 *reftag = 0xDEADBEEF;
1196 phba->lpfc_injerr_rref_cnt--;
1197 if (phba->lpfc_injerr_rref_cnt == 0) {
1198 phba->lpfc_injerr_nportid = 0;
1199 phba->lpfc_injerr_lba =
1200 LPFC_INJERR_LBA_OFF;
1201 memset(&phba->lpfc_injerr_wwpn,
1202 0, sizeof(struct lpfc_name));
1206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1207 "9079 BLKGRD: Injecting reftag error: "
1208 "read lba x%lx\n", (unsigned long)lba);
1214 /* Should we change the Application Tag */
1216 if (phba->lpfc_injerr_wapp_cnt) {
1218 case SCSI_PROT_WRITE_PASS:
1221 * For WRITE_PASS, force the error
1222 * to be sent on the wire. It should
1223 * be detected by the Target.
1224 * If blockoff != 0 error will be
1225 * inserted in middle of the IO.
1228 lpfc_printf_log(phba, KERN_ERR,
1230 "9080 BLKGRD: Injecting apptag error: "
1231 "write lba x%lx + x%x oldappTag x%x\n",
1232 (unsigned long)lba, blockoff,
1233 be16_to_cpu(src->app_tag));
1236 * Save the old app_tag so we can
1237 * restore it on completion.
1240 lpfc_cmd->prot_data_type =
1242 lpfc_cmd->prot_data_segment =
1244 lpfc_cmd->prot_data =
1247 src->app_tag = cpu_to_be16(0xDEAD);
1248 phba->lpfc_injerr_wapp_cnt--;
1249 if (phba->lpfc_injerr_wapp_cnt == 0) {
1250 phba->lpfc_injerr_nportid = 0;
1251 phba->lpfc_injerr_lba =
1252 LPFC_INJERR_LBA_OFF;
1253 memset(&phba->lpfc_injerr_wwpn,
1254 0, sizeof(struct lpfc_name));
1256 rc = BG_ERR_TGT | BG_ERR_CHECK;
1260 case SCSI_PROT_WRITE_INSERT:
1262 * For WRITE_INSERT, force the
1263 * error to be sent on the wire. It should be
1264 * detected by the Target.
1266 /* DEAD will be the apptag on the wire */
1268 phba->lpfc_injerr_wapp_cnt--;
1269 if (phba->lpfc_injerr_wapp_cnt == 0) {
1270 phba->lpfc_injerr_nportid = 0;
1271 phba->lpfc_injerr_lba =
1272 LPFC_INJERR_LBA_OFF;
1273 memset(&phba->lpfc_injerr_wwpn,
1274 0, sizeof(struct lpfc_name));
1276 rc = BG_ERR_TGT | BG_ERR_CHECK;
1278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1279 "0813 BLKGRD: Injecting apptag error: "
1280 "write lba x%lx\n", (unsigned long)lba);
1282 case SCSI_PROT_WRITE_STRIP:
1284 * For WRITE_STRIP and WRITE_PASS,
1285 * force the error on data
1286 * being copied from SLI-Host to SLI-Port.
1289 phba->lpfc_injerr_wapp_cnt--;
1290 if (phba->lpfc_injerr_wapp_cnt == 0) {
1291 phba->lpfc_injerr_nportid = 0;
1292 phba->lpfc_injerr_lba =
1293 LPFC_INJERR_LBA_OFF;
1294 memset(&phba->lpfc_injerr_wwpn,
1295 0, sizeof(struct lpfc_name));
1299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1300 "0812 BLKGRD: Injecting apptag error: "
1301 "write lba x%lx\n", (unsigned long)lba);
1305 if (phba->lpfc_injerr_rapp_cnt) {
1307 case SCSI_PROT_READ_INSERT:
1308 case SCSI_PROT_READ_STRIP:
1309 case SCSI_PROT_READ_PASS:
1311 * For READ_STRIP and READ_PASS, force the
1312 * error on data being read off the wire. It
1313 * should force an IO error to the driver.
1316 phba->lpfc_injerr_rapp_cnt--;
1317 if (phba->lpfc_injerr_rapp_cnt == 0) {
1318 phba->lpfc_injerr_nportid = 0;
1319 phba->lpfc_injerr_lba =
1320 LPFC_INJERR_LBA_OFF;
1321 memset(&phba->lpfc_injerr_wwpn,
1322 0, sizeof(struct lpfc_name));
1326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1327 "0814 BLKGRD: Injecting apptag error: "
1328 "read lba x%lx\n", (unsigned long)lba);
1335 /* Should we change the Guard Tag */
1337 if (phba->lpfc_injerr_wgrd_cnt) {
1339 case SCSI_PROT_WRITE_PASS:
1343 case SCSI_PROT_WRITE_INSERT:
1345 * For WRITE_INSERT, force the
1346 * error to be sent on the wire. It should be
1347 * detected by the Target.
1349 phba->lpfc_injerr_wgrd_cnt--;
1350 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1351 phba->lpfc_injerr_nportid = 0;
1352 phba->lpfc_injerr_lba =
1353 LPFC_INJERR_LBA_OFF;
1354 memset(&phba->lpfc_injerr_wwpn,
1355 0, sizeof(struct lpfc_name));
1358 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1359 /* Signals the caller to swap CRC->CSUM */
1361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1362 "0817 BLKGRD: Injecting guard error: "
1363 "write lba x%lx\n", (unsigned long)lba);
1365 case SCSI_PROT_WRITE_STRIP:
1367 * For WRITE_STRIP and WRITE_PASS,
1368 * force the error on data
1369 * being copied from SLI-Host to SLI-Port.
1371 phba->lpfc_injerr_wgrd_cnt--;
1372 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1373 phba->lpfc_injerr_nportid = 0;
1374 phba->lpfc_injerr_lba =
1375 LPFC_INJERR_LBA_OFF;
1376 memset(&phba->lpfc_injerr_wwpn,
1377 0, sizeof(struct lpfc_name));
1380 rc = BG_ERR_INIT | BG_ERR_SWAP;
1381 /* Signals the caller to swap CRC->CSUM */
1383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1384 "0816 BLKGRD: Injecting guard error: "
1385 "write lba x%lx\n", (unsigned long)lba);
1389 if (phba->lpfc_injerr_rgrd_cnt) {
1391 case SCSI_PROT_READ_INSERT:
1392 case SCSI_PROT_READ_STRIP:
1393 case SCSI_PROT_READ_PASS:
1395 * For READ_STRIP and READ_PASS, force the
1396 * error on data being read off the wire. It
1397 * should force an IO error to the driver.
1399 phba->lpfc_injerr_rgrd_cnt--;
1400 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1401 phba->lpfc_injerr_nportid = 0;
1402 phba->lpfc_injerr_lba =
1403 LPFC_INJERR_LBA_OFF;
1404 memset(&phba->lpfc_injerr_wwpn,
1405 0, sizeof(struct lpfc_name));
1408 rc = BG_ERR_INIT | BG_ERR_SWAP;
1409 /* Signals the caller to swap CRC->CSUM */
1411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1412 "0818 BLKGRD: Injecting guard error: "
1413 "read lba x%lx\n", (unsigned long)lba);
1423 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1424 * the specified SCSI command.
1425 * @phba: The Hba for which this call is being executed.
1426 * @sc: The SCSI command to examine
1427 * @txop: (out) BlockGuard operation for transmitted data
1428 * @rxop: (out) BlockGuard operation for received data
1430 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1434 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1435 uint8_t *txop, uint8_t *rxop)
1439 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1440 switch (scsi_get_prot_op(sc)) {
1441 case SCSI_PROT_READ_INSERT:
1442 case SCSI_PROT_WRITE_STRIP:
1443 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1444 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1447 case SCSI_PROT_READ_STRIP:
1448 case SCSI_PROT_WRITE_INSERT:
1449 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1450 *txop = BG_OP_IN_NODIF_OUT_CRC;
1453 case SCSI_PROT_READ_PASS:
1454 case SCSI_PROT_WRITE_PASS:
1455 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1456 *txop = BG_OP_IN_CSUM_OUT_CRC;
1459 case SCSI_PROT_NORMAL:
1461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1462 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1463 scsi_get_prot_op(sc));
1469 switch (scsi_get_prot_op(sc)) {
1470 case SCSI_PROT_READ_STRIP:
1471 case SCSI_PROT_WRITE_INSERT:
1472 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1473 *txop = BG_OP_IN_NODIF_OUT_CRC;
1476 case SCSI_PROT_READ_PASS:
1477 case SCSI_PROT_WRITE_PASS:
1478 *rxop = BG_OP_IN_CRC_OUT_CRC;
1479 *txop = BG_OP_IN_CRC_OUT_CRC;
1482 case SCSI_PROT_READ_INSERT:
1483 case SCSI_PROT_WRITE_STRIP:
1484 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1485 *txop = BG_OP_IN_CRC_OUT_NODIF;
1488 case SCSI_PROT_NORMAL:
1490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1491 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1492 scsi_get_prot_op(sc));
1501 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1503 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1504 * the specified SCSI command in order to force a guard tag error.
1505 * @phba: The Hba for which this call is being executed.
1506 * @sc: The SCSI command to examine
1507 * @txop: (out) BlockGuard operation for transmitted data
1508 * @rxop: (out) BlockGuard operation for received data
1510 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1514 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1515 uint8_t *txop, uint8_t *rxop)
1518 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1519 switch (scsi_get_prot_op(sc)) {
1520 case SCSI_PROT_READ_INSERT:
1521 case SCSI_PROT_WRITE_STRIP:
1522 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1523 *txop = BG_OP_IN_CRC_OUT_NODIF;
1526 case SCSI_PROT_READ_STRIP:
1527 case SCSI_PROT_WRITE_INSERT:
1528 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1529 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1532 case SCSI_PROT_READ_PASS:
1533 case SCSI_PROT_WRITE_PASS:
1534 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1535 *txop = BG_OP_IN_CRC_OUT_CSUM;
1538 case SCSI_PROT_NORMAL:
1544 switch (scsi_get_prot_op(sc)) {
1545 case SCSI_PROT_READ_STRIP:
1546 case SCSI_PROT_WRITE_INSERT:
1547 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1548 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1551 case SCSI_PROT_READ_PASS:
1552 case SCSI_PROT_WRITE_PASS:
1553 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1554 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1557 case SCSI_PROT_READ_INSERT:
1558 case SCSI_PROT_WRITE_STRIP:
1559 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1560 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1563 case SCSI_PROT_NORMAL:
1574 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1575 * @phba: The Hba for which this call is being executed.
1576 * @sc: pointer to scsi command we're working on
1577 * @bpl: pointer to buffer list for protection groups
1578 * @datasegcnt: number of segments of data that have been dma mapped
1580 * This function sets up BPL buffer list for protection groups of
1581 * type LPFC_PG_TYPE_NO_DIF
1583 * This is usually used when the HBA is instructed to generate
1584 * DIFs and insert them into data stream (or strip DIF from
1585 * incoming data stream)
1587 * The buffer list consists of just one protection group described
1589 * +-------------------------+
1590 * start of prot group --> | PDE_5 |
1591 * +-------------------------+
1593 * +-------------------------+
1595 * +-------------------------+
1596 * |more Data BDE's ... (opt)|
1597 * +-------------------------+
1600 * Note: Data s/g buffers have been dma mapped
1602 * Returns the number of BDEs added to the BPL.
1605 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1606 struct ulp_bde64 *bpl, int datasegcnt)
1608 struct scatterlist *sgde = NULL; /* s/g data entry */
1609 struct lpfc_pde5 *pde5 = NULL;
1610 struct lpfc_pde6 *pde6 = NULL;
1611 dma_addr_t physaddr;
1612 int i = 0, num_bde = 0, status;
1613 int datadir = sc->sc_data_direction;
1614 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1617 uint32_t checking = 1;
1621 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1625 /* extract some info from the scsi command for pde*/
1626 reftag = scsi_prot_ref_tag(sc);
1627 if (reftag == LPFC_INVALID_REFTAG)
1630 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1631 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1633 if (rc & BG_ERR_SWAP)
1634 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1635 if (rc & BG_ERR_CHECK)
1640 /* setup PDE5 with what we have */
1641 pde5 = (struct lpfc_pde5 *) bpl;
1642 memset(pde5, 0, sizeof(struct lpfc_pde5));
1643 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1645 /* Endianness conversion if necessary for PDE5 */
1646 pde5->word0 = cpu_to_le32(pde5->word0);
1647 pde5->reftag = cpu_to_le32(reftag);
1649 /* advance bpl and increment bde count */
1652 pde6 = (struct lpfc_pde6 *) bpl;
1654 /* setup PDE6 with the rest of the info */
1655 memset(pde6, 0, sizeof(struct lpfc_pde6));
1656 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1657 bf_set(pde6_optx, pde6, txop);
1658 bf_set(pde6_oprx, pde6, rxop);
1661 * We only need to check the data on READs, for WRITEs
1662 * protection data is automatically generated, not checked.
1664 if (datadir == DMA_FROM_DEVICE) {
1665 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1666 bf_set(pde6_ce, pde6, checking);
1668 bf_set(pde6_ce, pde6, 0);
1670 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1671 bf_set(pde6_re, pde6, checking);
1673 bf_set(pde6_re, pde6, 0);
1675 bf_set(pde6_ai, pde6, 1);
1676 bf_set(pde6_ae, pde6, 0);
1677 bf_set(pde6_apptagval, pde6, 0);
1679 /* Endianness conversion if necessary for PDE6 */
1680 pde6->word0 = cpu_to_le32(pde6->word0);
1681 pde6->word1 = cpu_to_le32(pde6->word1);
1682 pde6->word2 = cpu_to_le32(pde6->word2);
1684 /* advance bpl and increment bde count */
1688 /* assumption: caller has already run dma_map_sg on command data */
1689 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1690 physaddr = sg_dma_address(sgde);
1691 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1692 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1693 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1694 if (datadir == DMA_TO_DEVICE)
1695 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1697 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1698 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1708 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1709 * @phba: The Hba for which this call is being executed.
1710 * @sc: pointer to scsi command we're working on
1711 * @bpl: pointer to buffer list for protection groups
1712 * @datacnt: number of segments of data that have been dma mapped
1713 * @protcnt: number of segment of protection data that have been dma mapped
1715 * This function sets up BPL buffer list for protection groups of
1716 * type LPFC_PG_TYPE_DIF
1718 * This is usually used when DIFs are in their own buffers,
1719 * separate from the data. The HBA can then by instructed
1720 * to place the DIFs in the outgoing stream. For read operations,
1721 * The HBA could extract the DIFs and place it in DIF buffers.
1723 * The buffer list for this type consists of one or more of the
1724 * protection groups described below:
1725 * +-------------------------+
1726 * start of first prot group --> | PDE_5 |
1727 * +-------------------------+
1729 * +-------------------------+
1730 * | PDE_7 (Prot BDE) |
1731 * +-------------------------+
1733 * +-------------------------+
1734 * |more Data BDE's ... (opt)|
1735 * +-------------------------+
1736 * start of new prot group --> | PDE_5 |
1737 * +-------------------------+
1739 * +-------------------------+
1741 * Note: It is assumed that both data and protection s/g buffers have been
1744 * Returns the number of BDEs added to the BPL.
1747 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1748 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1750 struct scatterlist *sgde = NULL; /* s/g data entry */
1751 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1752 struct lpfc_pde5 *pde5 = NULL;
1753 struct lpfc_pde6 *pde6 = NULL;
1754 struct lpfc_pde7 *pde7 = NULL;
1755 dma_addr_t dataphysaddr, protphysaddr;
1756 unsigned short curr_data = 0, curr_prot = 0;
1757 unsigned int split_offset;
1758 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1759 unsigned int protgrp_blks, protgrp_bytes;
1760 unsigned int remainder, subtotal;
1762 int datadir = sc->sc_data_direction;
1763 unsigned char pgdone = 0, alldone = 0;
1765 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1768 uint32_t checking = 1;
1773 sgpe = scsi_prot_sglist(sc);
1774 sgde = scsi_sglist(sc);
1776 if (!sgpe || !sgde) {
1777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1778 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1783 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1787 /* extract some info from the scsi command */
1788 blksize = scsi_prot_interval(sc);
1789 reftag = scsi_prot_ref_tag(sc);
1790 if (reftag == LPFC_INVALID_REFTAG)
1793 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1794 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1796 if (rc & BG_ERR_SWAP)
1797 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1798 if (rc & BG_ERR_CHECK)
1805 /* Check to see if we ran out of space */
1806 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1809 /* setup PDE5 with what we have */
1810 pde5 = (struct lpfc_pde5 *) bpl;
1811 memset(pde5, 0, sizeof(struct lpfc_pde5));
1812 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1814 /* Endianness conversion if necessary for PDE5 */
1815 pde5->word0 = cpu_to_le32(pde5->word0);
1816 pde5->reftag = cpu_to_le32(reftag);
1818 /* advance bpl and increment bde count */
1821 pde6 = (struct lpfc_pde6 *) bpl;
1823 /* setup PDE6 with the rest of the info */
1824 memset(pde6, 0, sizeof(struct lpfc_pde6));
1825 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1826 bf_set(pde6_optx, pde6, txop);
1827 bf_set(pde6_oprx, pde6, rxop);
1829 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1830 bf_set(pde6_ce, pde6, checking);
1832 bf_set(pde6_ce, pde6, 0);
1834 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1835 bf_set(pde6_re, pde6, checking);
1837 bf_set(pde6_re, pde6, 0);
1839 bf_set(pde6_ai, pde6, 1);
1840 bf_set(pde6_ae, pde6, 0);
1841 bf_set(pde6_apptagval, pde6, 0);
1843 /* Endianness conversion if necessary for PDE6 */
1844 pde6->word0 = cpu_to_le32(pde6->word0);
1845 pde6->word1 = cpu_to_le32(pde6->word1);
1846 pde6->word2 = cpu_to_le32(pde6->word2);
1848 /* advance bpl and increment bde count */
1852 /* setup the first BDE that points to protection buffer */
1853 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1854 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1856 /* must be integer multiple of the DIF block length */
1857 BUG_ON(protgroup_len % 8);
1859 pde7 = (struct lpfc_pde7 *) bpl;
1860 memset(pde7, 0, sizeof(struct lpfc_pde7));
1861 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1863 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1864 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1866 protgrp_blks = protgroup_len / 8;
1867 protgrp_bytes = protgrp_blks * blksize;
1869 /* check if this pde is crossing the 4K boundary; if so split */
1870 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1871 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1872 protgroup_offset += protgroup_remainder;
1873 protgrp_blks = protgroup_remainder / 8;
1874 protgrp_bytes = protgrp_blks * blksize;
1876 protgroup_offset = 0;
1882 /* setup BDE's for data blocks associated with DIF data */
1884 subtotal = 0; /* total bytes processed for current prot grp */
1886 /* Check to see if we ran out of space */
1887 if (num_bde >= phba->cfg_total_seg_cnt)
1891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1892 "9065 BLKGRD:%s Invalid data segment\n",
1897 dataphysaddr = sg_dma_address(sgde) + split_offset;
1898 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1899 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1901 remainder = sg_dma_len(sgde) - split_offset;
1903 if ((subtotal + remainder) <= protgrp_bytes) {
1904 /* we can use this whole buffer */
1905 bpl->tus.f.bdeSize = remainder;
1908 if ((subtotal + remainder) == protgrp_bytes)
1911 /* must split this buffer with next prot grp */
1912 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1913 split_offset += bpl->tus.f.bdeSize;
1916 subtotal += bpl->tus.f.bdeSize;
1918 if (datadir == DMA_TO_DEVICE)
1919 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1921 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1922 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1930 /* Move to the next s/g segment if possible */
1931 sgde = sg_next(sgde);
1935 if (protgroup_offset) {
1936 /* update the reference tag */
1937 reftag += protgrp_blks;
1943 if (curr_prot == protcnt) {
1945 } else if (curr_prot < protcnt) {
1946 /* advance to next prot buffer */
1947 sgpe = sg_next(sgpe);
1950 /* update the reference tag */
1951 reftag += protgrp_blks;
1953 /* if we're here, we have a bug */
1954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1955 "9054 BLKGRD: bug in %s\n", __func__);
1965 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1966 * @phba: The Hba for which this call is being executed.
1967 * @sc: pointer to scsi command we're working on
1968 * @sgl: pointer to buffer list for protection groups
1969 * @datasegcnt: number of segments of data that have been dma mapped
1970 * @lpfc_cmd: lpfc scsi command object pointer.
1972 * This function sets up SGL buffer list for protection groups of
1973 * type LPFC_PG_TYPE_NO_DIF
1975 * This is usually used when the HBA is instructed to generate
1976 * DIFs and insert them into data stream (or strip DIF from
1977 * incoming data stream)
1979 * The buffer list consists of just one protection group described
1981 * +-------------------------+
1982 * start of prot group --> | DI_SEED |
1983 * +-------------------------+
1985 * +-------------------------+
1986 * |more Data SGE's ... (opt)|
1987 * +-------------------------+
1990 * Note: Data s/g buffers have been dma mapped
1992 * Returns the number of SGEs added to the SGL.
1995 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1996 struct sli4_sge *sgl, int datasegcnt,
1997 struct lpfc_io_buf *lpfc_cmd)
1999 struct scatterlist *sgde = NULL; /* s/g data entry */
2000 struct sli4_sge_diseed *diseed = NULL;
2001 dma_addr_t physaddr;
2002 int i = 0, num_sge = 0, status;
2005 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2008 uint32_t checking = 1;
2010 uint32_t dma_offset = 0;
2011 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2013 bool lsp_just_set = false;
2015 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2019 /* extract some info from the scsi command for pde*/
2020 reftag = scsi_prot_ref_tag(sc);
2021 if (reftag == LPFC_INVALID_REFTAG)
2024 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2025 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2027 if (rc & BG_ERR_SWAP)
2028 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2029 if (rc & BG_ERR_CHECK)
2034 /* setup DISEED with what we have */
2035 diseed = (struct sli4_sge_diseed *) sgl;
2036 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2037 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2039 /* Endianness conversion if necessary */
2040 diseed->ref_tag = cpu_to_le32(reftag);
2041 diseed->ref_tag_tran = diseed->ref_tag;
2044 * We only need to check the data on READs, for WRITEs
2045 * protection data is automatically generated, not checked.
2047 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2048 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
2049 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2051 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2053 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2054 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2056 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2059 /* setup DISEED with the rest of the info */
2060 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2061 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2063 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2064 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2066 /* Endianness conversion if necessary for DISEED */
2067 diseed->word2 = cpu_to_le32(diseed->word2);
2068 diseed->word3 = cpu_to_le32(diseed->word3);
2070 /* advance bpl and increment sge count */
2074 /* assumption: caller has already run dma_map_sg on command data */
2075 sgde = scsi_sglist(sc);
2077 for (i = 0; i < datasegcnt; i++) {
2081 /* do we need to expand the segment */
2082 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2083 ((datasegcnt - 1) != i)) {
2085 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2087 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2089 if (unlikely(!sgl_xtra)) {
2090 lpfc_cmd->seg_cnt = 0;
2093 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2094 sgl_xtra->dma_phys_sgl));
2095 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2096 sgl_xtra->dma_phys_sgl));
2099 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2102 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2103 if ((datasegcnt - 1) == i)
2104 bf_set(lpfc_sli4_sge_last, sgl, 1);
2105 physaddr = sg_dma_address(sgde);
2106 dma_len = sg_dma_len(sgde);
2107 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2108 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2110 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2111 sgl->word2 = cpu_to_le32(sgl->word2);
2112 sgl->sge_len = cpu_to_le32(dma_len);
2114 dma_offset += dma_len;
2115 sgde = sg_next(sgde);
2119 lsp_just_set = false;
2122 sgl->word2 = cpu_to_le32(sgl->word2);
2123 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2125 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2128 lsp_just_set = true;
2140 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2141 * @phba: The Hba for which this call is being executed.
2142 * @sc: pointer to scsi command we're working on
2143 * @sgl: pointer to buffer list for protection groups
2144 * @datacnt: number of segments of data that have been dma mapped
2145 * @protcnt: number of segment of protection data that have been dma mapped
2146 * @lpfc_cmd: lpfc scsi command object pointer.
2148 * This function sets up SGL buffer list for protection groups of
2149 * type LPFC_PG_TYPE_DIF
2151 * This is usually used when DIFs are in their own buffers,
2152 * separate from the data. The HBA can then by instructed
2153 * to place the DIFs in the outgoing stream. For read operations,
2154 * The HBA could extract the DIFs and place it in DIF buffers.
2156 * The buffer list for this type consists of one or more of the
2157 * protection groups described below:
2158 * +-------------------------+
2159 * start of first prot group --> | DISEED |
2160 * +-------------------------+
2161 * | DIF (Prot SGE) |
2162 * +-------------------------+
2164 * +-------------------------+
2165 * |more Data SGE's ... (opt)|
2166 * +-------------------------+
2167 * start of new prot group --> | DISEED |
2168 * +-------------------------+
2170 * +-------------------------+
2172 * Note: It is assumed that both data and protection s/g buffers have been
2175 * Returns the number of SGEs added to the SGL.
2178 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2179 struct sli4_sge *sgl, int datacnt, int protcnt,
2180 struct lpfc_io_buf *lpfc_cmd)
2182 struct scatterlist *sgde = NULL; /* s/g data entry */
2183 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2184 struct sli4_sge_diseed *diseed = NULL;
2185 dma_addr_t dataphysaddr, protphysaddr;
2186 unsigned short curr_data = 0, curr_prot = 0;
2187 unsigned int split_offset;
2188 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2189 unsigned int protgrp_blks, protgrp_bytes;
2190 unsigned int remainder, subtotal;
2192 unsigned char pgdone = 0, alldone = 0;
2197 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2200 uint32_t checking = 1;
2201 uint32_t dma_offset = 0;
2202 int num_sge = 0, j = 2;
2203 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2205 sgpe = scsi_prot_sglist(sc);
2206 sgde = scsi_sglist(sc);
2208 if (!sgpe || !sgde) {
2209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2210 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2215 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2219 /* extract some info from the scsi command */
2220 blksize = scsi_prot_interval(sc);
2221 reftag = scsi_prot_ref_tag(sc);
2222 if (reftag == LPFC_INVALID_REFTAG)
2225 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2226 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2228 if (rc & BG_ERR_SWAP)
2229 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2230 if (rc & BG_ERR_CHECK)
2237 /* Check to see if we ran out of space */
2238 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2242 /* DISEED and DIF have to be together */
2243 if (!((j + 1) % phba->border_sge_num) ||
2244 !((j + 2) % phba->border_sge_num) ||
2245 !((j + 3) % phba->border_sge_num)) {
2249 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2251 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2253 if (unlikely(!sgl_xtra)) {
2256 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2257 sgl_xtra->dma_phys_sgl));
2258 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2259 sgl_xtra->dma_phys_sgl));
2262 sgl->word2 = cpu_to_le32(sgl->word2);
2263 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2265 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2269 /* setup DISEED with what we have */
2270 diseed = (struct sli4_sge_diseed *) sgl;
2271 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2272 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2274 /* Endianness conversion if necessary */
2275 diseed->ref_tag = cpu_to_le32(reftag);
2276 diseed->ref_tag_tran = diseed->ref_tag;
2278 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2279 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2281 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2283 * When in this mode, the hardware will replace
2284 * the guard tag from the host with a
2285 * newly generated good CRC for the wire.
2286 * Switch to raw mode here to avoid this
2287 * behavior. What the host sends gets put on the wire.
2289 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2290 txop = BG_OP_RAW_MODE;
2291 rxop = BG_OP_RAW_MODE;
2296 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2297 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2299 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2301 /* setup DISEED with the rest of the info */
2302 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2303 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2305 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2306 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2308 /* Endianness conversion if necessary for DISEED */
2309 diseed->word2 = cpu_to_le32(diseed->word2);
2310 diseed->word3 = cpu_to_le32(diseed->word3);
2312 /* advance sgl and increment bde count */
2318 /* setup the first BDE that points to protection buffer */
2319 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2320 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2322 /* must be integer multiple of the DIF block length */
2323 BUG_ON(protgroup_len % 8);
2325 /* Now setup DIF SGE */
2327 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2328 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2329 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2330 sgl->word2 = cpu_to_le32(sgl->word2);
2333 protgrp_blks = protgroup_len / 8;
2334 protgrp_bytes = protgrp_blks * blksize;
2336 /* check if DIF SGE is crossing the 4K boundary; if so split */
2337 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2338 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2339 protgroup_offset += protgroup_remainder;
2340 protgrp_blks = protgroup_remainder / 8;
2341 protgrp_bytes = protgrp_blks * blksize;
2343 protgroup_offset = 0;
2349 /* setup SGE's for data blocks associated with DIF data */
2351 subtotal = 0; /* total bytes processed for current prot grp */
2357 /* Check to see if we ran out of space */
2358 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2364 "9086 BLKGRD:%s Invalid data segment\n",
2369 if (!((j + 1) % phba->border_sge_num)) {
2373 bf_set(lpfc_sli4_sge_type, sgl,
2376 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2379 if (unlikely(!sgl_xtra)) {
2382 sgl->addr_lo = cpu_to_le32(
2383 putPaddrLow(sgl_xtra->dma_phys_sgl));
2384 sgl->addr_hi = cpu_to_le32(
2385 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2388 sgl->word2 = cpu_to_le32(sgl->word2);
2389 sgl->sge_len = cpu_to_le32(
2390 phba->cfg_sg_dma_buf_size);
2392 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2394 dataphysaddr = sg_dma_address(sgde) +
2397 remainder = sg_dma_len(sgde) - split_offset;
2399 if ((subtotal + remainder) <= protgrp_bytes) {
2400 /* we can use this whole buffer */
2401 dma_len = remainder;
2404 if ((subtotal + remainder) ==
2408 /* must split this buffer with next
2411 dma_len = protgrp_bytes - subtotal;
2412 split_offset += dma_len;
2415 subtotal += dma_len;
2418 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2420 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2422 bf_set(lpfc_sli4_sge_last, sgl, 0);
2423 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2424 bf_set(lpfc_sli4_sge_type, sgl,
2425 LPFC_SGE_TYPE_DATA);
2427 sgl->sge_len = cpu_to_le32(dma_len);
2428 dma_offset += dma_len;
2439 /* Move to the next s/g segment if possible */
2440 sgde = sg_next(sgde);
2448 if (protgroup_offset) {
2449 /* update the reference tag */
2450 reftag += protgrp_blks;
2455 if (curr_prot == protcnt) {
2456 /* mark the last SGL */
2458 bf_set(lpfc_sli4_sge_last, sgl, 1);
2460 } else if (curr_prot < protcnt) {
2461 /* advance to next prot buffer */
2462 sgpe = sg_next(sgpe);
2464 /* update the reference tag */
2465 reftag += protgrp_blks;
2467 /* if we're here, we have a bug */
2468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2469 "9085 BLKGRD: bug in %s\n", __func__);
2480 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2481 * @phba: The Hba for which this call is being executed.
2482 * @sc: pointer to scsi command we're working on
2484 * Given a SCSI command that supports DIF, determine composition of protection
2485 * groups involved in setting up buffer lists
2487 * Returns: Protection group type (with or without DIF)
2491 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2493 int ret = LPFC_PG_TYPE_INVALID;
2494 unsigned char op = scsi_get_prot_op(sc);
2497 case SCSI_PROT_READ_STRIP:
2498 case SCSI_PROT_WRITE_INSERT:
2499 ret = LPFC_PG_TYPE_NO_DIF;
2501 case SCSI_PROT_READ_INSERT:
2502 case SCSI_PROT_WRITE_STRIP:
2503 case SCSI_PROT_READ_PASS:
2504 case SCSI_PROT_WRITE_PASS:
2505 ret = LPFC_PG_TYPE_DIF_BUF;
2509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2510 "9021 Unsupported protection op:%d\n",
2518 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2519 * @phba: The Hba for which this call is being executed.
2520 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2522 * Adjust the data length to account for how much data
2523 * is actually on the wire.
2525 * returns the adjusted data length
2528 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2529 struct lpfc_io_buf *lpfc_cmd)
2531 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2534 fcpdl = scsi_bufflen(sc);
2536 /* Check if there is protection data on the wire */
2537 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2538 /* Read check for protection data */
2539 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2543 /* Write check for protection data */
2544 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2549 * If we are in DIF Type 1 mode every data block has a 8 byte
2550 * DIF (trailer) attached to it. Must ajust FCP data length
2551 * to account for the protection data.
2553 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2559 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2560 * @phba: The Hba for which this call is being executed.
2561 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2563 * This is the protection/DIF aware version of
2564 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2565 * two functions eventually, but for now, it's here.
2566 * RETURNS 0 - SUCCESS,
2567 * 1 - Failed DMA map, retry.
2568 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2571 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2572 struct lpfc_io_buf *lpfc_cmd)
2574 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2575 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2576 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2577 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2578 uint32_t num_bde = 0;
2579 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2580 int prot_group_type = 0;
2583 struct lpfc_vport *vport = phba->pport;
2586 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2587 * fcp_rsp regions to the first data bde entry
2590 if (scsi_sg_count(scsi_cmnd)) {
2592 * The driver stores the segment count returned from dma_map_sg
2593 * because this a count of dma-mappings used to map the use_sg
2594 * pages. They are not guaranteed to be the same for those
2595 * architectures that implement an IOMMU.
2597 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2598 scsi_sglist(scsi_cmnd),
2599 scsi_sg_count(scsi_cmnd), datadir);
2600 if (unlikely(!datasegcnt))
2603 lpfc_cmd->seg_cnt = datasegcnt;
2605 /* First check if data segment count from SCSI Layer is good */
2606 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2607 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2612 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2614 switch (prot_group_type) {
2615 case LPFC_PG_TYPE_NO_DIF:
2617 /* Here we need to add a PDE5 and PDE6 to the count */
2618 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2623 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2625 /* we should have 2 or more entries in buffer list */
2632 case LPFC_PG_TYPE_DIF_BUF:
2634 * This type indicates that protection buffers are
2635 * passed to the driver, so that needs to be prepared
2638 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2639 scsi_prot_sglist(scsi_cmnd),
2640 scsi_prot_sg_count(scsi_cmnd), datadir);
2641 if (unlikely(!protsegcnt)) {
2642 scsi_dma_unmap(scsi_cmnd);
2646 lpfc_cmd->prot_seg_cnt = protsegcnt;
2649 * There is a minimun of 4 BPLs used for every
2650 * protection data segment.
2652 if ((lpfc_cmd->prot_seg_cnt * 4) >
2653 (phba->cfg_total_seg_cnt - 2)) {
2658 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2659 datasegcnt, protsegcnt);
2660 /* we should have 3 or more entries in buffer list */
2661 if ((num_bde < 3) ||
2662 (num_bde > phba->cfg_total_seg_cnt)) {
2668 case LPFC_PG_TYPE_INVALID:
2670 scsi_dma_unmap(scsi_cmnd);
2671 lpfc_cmd->seg_cnt = 0;
2673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2674 "9022 Unexpected protection group %i\n",
2681 * Finish initializing those IOCB fields that are dependent on the
2682 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2683 * reinitialized since all iocb memory resources are used many times
2684 * for transmit, receive, and continuation bpl's.
2686 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2687 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2688 iocb_cmd->ulpBdeCount = 1;
2689 iocb_cmd->ulpLe = 1;
2691 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2692 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2695 * Due to difference in data length between DIF/non-DIF paths,
2696 * we need to set word 4 of IOCB here
2698 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2701 * For First burst, we may need to adjust the initial transfer
2704 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2705 (fcpdl < vport->cfg_first_burst_size))
2706 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2710 if (lpfc_cmd->seg_cnt)
2711 scsi_dma_unmap(scsi_cmnd);
2712 if (lpfc_cmd->prot_seg_cnt)
2713 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2714 scsi_prot_sg_count(scsi_cmnd),
2715 scsi_cmnd->sc_data_direction);
2717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2718 "9023 Cannot setup S/G List for HBA"
2719 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2720 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2721 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2722 prot_group_type, num_bde);
2724 lpfc_cmd->seg_cnt = 0;
2725 lpfc_cmd->prot_seg_cnt = 0;
2730 * This function calcuates the T10 DIF guard tag
2731 * on the specified data using a CRC algorithmn
2735 lpfc_bg_crc(uint8_t *data, int count)
2740 crc = crc_t10dif(data, count);
2741 x = cpu_to_be16(crc);
2746 * This function calcuates the T10 DIF guard tag
2747 * on the specified data using a CSUM algorithmn
2748 * using ip_compute_csum.
2751 lpfc_bg_csum(uint8_t *data, int count)
2755 ret = ip_compute_csum(data, count);
2760 * This function examines the protection data to try to determine
2761 * what type of T10-DIF error occurred.
2764 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2766 struct scatterlist *sgpe; /* s/g prot entry */
2767 struct scatterlist *sgde; /* s/g data entry */
2768 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2769 struct scsi_dif_tuple *src = NULL;
2770 uint8_t *data_src = NULL;
2772 uint16_t start_app_tag, app_tag;
2773 uint32_t start_ref_tag, ref_tag;
2774 int prot, protsegcnt;
2775 int err_type, len, data_len;
2776 int chk_ref, chk_app, chk_guard;
2780 err_type = BGS_GUARD_ERR_MASK;
2784 /* First check to see if there is protection data to examine */
2785 prot = scsi_get_prot_op(cmd);
2786 if ((prot == SCSI_PROT_READ_STRIP) ||
2787 (prot == SCSI_PROT_WRITE_INSERT) ||
2788 (prot == SCSI_PROT_NORMAL))
2791 /* Currently the driver just supports ref_tag and guard_tag checking */
2796 /* Setup a ptr to the protection data provided by the SCSI host */
2797 sgpe = scsi_prot_sglist(cmd);
2798 protsegcnt = lpfc_cmd->prot_seg_cnt;
2800 if (sgpe && protsegcnt) {
2803 * We will only try to verify guard tag if the segment
2804 * data length is a multiple of the blksize.
2806 sgde = scsi_sglist(cmd);
2807 blksize = scsi_prot_interval(cmd);
2808 data_src = (uint8_t *)sg_virt(sgde);
2809 data_len = sgde->length;
2810 if ((data_len & (blksize - 1)) == 0)
2813 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2814 start_ref_tag = scsi_prot_ref_tag(cmd);
2815 if (start_ref_tag == LPFC_INVALID_REFTAG)
2817 start_app_tag = src->app_tag;
2819 while (src && protsegcnt) {
2823 * First check to see if a protection data
2826 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2827 (src->app_tag == T10_PI_APP_ESCAPE)) {
2832 /* First Guard Tag checking */
2834 guard_tag = src->guard_tag;
2836 & SCSI_PROT_IP_CHECKSUM)
2837 sum = lpfc_bg_csum(data_src,
2840 sum = lpfc_bg_crc(data_src,
2842 if ((guard_tag != sum)) {
2843 err_type = BGS_GUARD_ERR_MASK;
2848 /* Reference Tag checking */
2849 ref_tag = be32_to_cpu(src->ref_tag);
2850 if (chk_ref && (ref_tag != start_ref_tag)) {
2851 err_type = BGS_REFTAG_ERR_MASK;
2856 /* App Tag checking */
2857 app_tag = src->app_tag;
2858 if (chk_app && (app_tag != start_app_tag)) {
2859 err_type = BGS_APPTAG_ERR_MASK;
2863 len -= sizeof(struct scsi_dif_tuple);
2868 data_src += blksize;
2869 data_len -= blksize;
2872 * Are we at the end of the Data segment?
2873 * The data segment is only used for Guard
2876 if (chk_guard && (data_len == 0)) {
2878 sgde = sg_next(sgde);
2882 data_src = (uint8_t *)sg_virt(sgde);
2883 data_len = sgde->length;
2884 if ((data_len & (blksize - 1)) == 0)
2889 /* Goto the next Protection data segment */
2890 sgpe = sg_next(sgpe);
2892 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2901 if (err_type == BGS_GUARD_ERR_MASK) {
2902 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2903 set_host_byte(cmd, DID_ABORT);
2904 phba->bg_guard_err_cnt++;
2905 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2906 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2907 scsi_prot_ref_tag(cmd),
2910 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2911 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2912 set_host_byte(cmd, DID_ABORT);
2914 phba->bg_reftag_err_cnt++;
2915 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2916 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2917 scsi_prot_ref_tag(cmd),
2918 ref_tag, start_ref_tag);
2920 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2921 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2922 set_host_byte(cmd, DID_ABORT);
2924 phba->bg_apptag_err_cnt++;
2925 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2926 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2927 scsi_prot_ref_tag(cmd),
2928 app_tag, start_app_tag);
2933 * This function checks for BlockGuard errors detected by
2934 * the HBA. In case of errors, the ASC/ASCQ fields in the
2935 * sense buffer will be set accordingly, paired with
2936 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2937 * detected corruption.
2940 * 0 - No error found
2941 * 1 - BlockGuard error found
2942 * -1 - Internal error (bad profile, ...etc)
2945 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2946 struct lpfc_iocbq *pIocbOut)
2948 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2949 struct sli3_bg_fields *bgf;
2951 struct lpfc_wcqe_complete *wcqe;
2955 u64 failing_sector = 0;
2957 if (phba->sli_rev == LPFC_SLI_REV4) {
2958 wcqe = &pIocbOut->wcqe_cmpl;
2959 status = bf_get(lpfc_wcqe_c_status, wcqe);
2961 if (status == CQE_STATUS_DI_ERROR) {
2962 /* Guard Check failed */
2963 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2964 bgstat |= BGS_GUARD_ERR_MASK;
2966 /* AppTag Check failed */
2967 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2968 bgstat |= BGS_APPTAG_ERR_MASK;
2970 /* RefTag Check failed */
2971 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2972 bgstat |= BGS_REFTAG_ERR_MASK;
2974 /* Check to see if there was any good data before the
2977 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2978 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2979 bghm = wcqe->total_data_placed;
2983 * Set ALL the error bits to indicate we don't know what
2984 * type of error it is.
2987 bgstat |= (BGS_REFTAG_ERR_MASK |
2988 BGS_APPTAG_ERR_MASK |
2989 BGS_GUARD_ERR_MASK);
2993 bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2995 bgstat = bgf->bgstat;
2998 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2999 cmd->result = DID_ERROR << 16;
3000 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3001 "9072 BLKGRD: Invalid BG Profile in cmd "
3002 "0x%x reftag 0x%x blk cnt 0x%x "
3003 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3004 scsi_prot_ref_tag(cmd),
3005 scsi_logical_block_count(cmd), bgstat, bghm);
3010 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3011 cmd->result = DID_ERROR << 16;
3012 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3013 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3014 "0x%x reftag 0x%x blk cnt 0x%x "
3015 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3016 scsi_prot_ref_tag(cmd),
3017 scsi_logical_block_count(cmd), bgstat, bghm);
3022 if (lpfc_bgs_get_guard_err(bgstat)) {
3024 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3025 set_host_byte(cmd, DID_ABORT);
3026 phba->bg_guard_err_cnt++;
3027 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3028 "9055 BLKGRD: Guard Tag error in cmd "
3029 "0x%x reftag 0x%x blk cnt 0x%x "
3030 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3031 scsi_prot_ref_tag(cmd),
3032 scsi_logical_block_count(cmd), bgstat, bghm);
3035 if (lpfc_bgs_get_reftag_err(bgstat)) {
3037 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3038 set_host_byte(cmd, DID_ABORT);
3039 phba->bg_reftag_err_cnt++;
3040 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3041 "9056 BLKGRD: Ref Tag error in cmd "
3042 "0x%x reftag 0x%x blk cnt 0x%x "
3043 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3044 scsi_prot_ref_tag(cmd),
3045 scsi_logical_block_count(cmd), bgstat, bghm);
3048 if (lpfc_bgs_get_apptag_err(bgstat)) {
3050 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3051 set_host_byte(cmd, DID_ABORT);
3052 phba->bg_apptag_err_cnt++;
3053 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3054 "9061 BLKGRD: App Tag error in cmd "
3055 "0x%x reftag 0x%x blk cnt 0x%x "
3056 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3057 scsi_prot_ref_tag(cmd),
3058 scsi_logical_block_count(cmd), bgstat, bghm);
3061 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3063 * setup sense data descriptor 0 per SPC-4 as an information
3064 * field, and put the failing LBA in it.
3065 * This code assumes there was also a guard/app/ref tag error
3068 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3069 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3070 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3071 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3073 /* bghm is a "on the wire" FC frame based count */
3074 switch (scsi_get_prot_op(cmd)) {
3075 case SCSI_PROT_READ_INSERT:
3076 case SCSI_PROT_WRITE_STRIP:
3077 bghm /= cmd->device->sector_size;
3079 case SCSI_PROT_READ_STRIP:
3080 case SCSI_PROT_WRITE_INSERT:
3081 case SCSI_PROT_READ_PASS:
3082 case SCSI_PROT_WRITE_PASS:
3083 bghm /= (cmd->device->sector_size +
3084 sizeof(struct scsi_dif_tuple));
3088 failing_sector = scsi_get_lba(cmd);
3089 failing_sector += bghm;
3091 /* Descriptor Information */
3092 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3096 /* No error was reported - problem in FW? */
3097 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3098 "9057 BLKGRD: Unknown error in cmd "
3099 "0x%x reftag 0x%x blk cnt 0x%x "
3100 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3101 scsi_prot_ref_tag(cmd),
3102 scsi_logical_block_count(cmd), bgstat, bghm);
3104 /* Calculate what type of error it was */
3105 lpfc_calc_bg_err(phba, lpfc_cmd);
3112 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3113 * @phba: The Hba for which this call is being executed.
3114 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3116 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3117 * field of @lpfc_cmd for device with SLI-4 interface spec.
3120 * 2 - Error - Do not retry
3125 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3127 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3128 struct scatterlist *sgel = NULL;
3129 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3130 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3131 struct sli4_sge *first_data_sgl;
3132 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3133 struct lpfc_vport *vport = phba->pport;
3134 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3135 dma_addr_t physaddr;
3137 uint32_t dma_offset = 0;
3139 struct ulp_bde64 *bde;
3140 bool lsp_just_set = false;
3141 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3144 * There are three possibilities here - use scatter-gather segment, use
3145 * the single mapping, or neither. Start the lpfc command prep by
3146 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3149 if (scsi_sg_count(scsi_cmnd)) {
3151 * The driver stores the segment count returned from dma_map_sg
3152 * because this a count of dma-mappings used to map the use_sg
3153 * pages. They are not guaranteed to be the same for those
3154 * architectures that implement an IOMMU.
3157 nseg = scsi_dma_map(scsi_cmnd);
3158 if (unlikely(nseg <= 0))
3161 /* clear the last flag in the fcp_rsp map entry */
3162 sgl->word2 = le32_to_cpu(sgl->word2);
3163 bf_set(lpfc_sli4_sge_last, sgl, 0);
3164 sgl->word2 = cpu_to_le32(sgl->word2);
3166 first_data_sgl = sgl;
3167 lpfc_cmd->seg_cnt = nseg;
3168 if (!phba->cfg_xpsgl &&
3169 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3172 " %s: Too many sg segments from "
3173 "dma_map_sg. Config %d, seg_cnt %d\n",
3174 __func__, phba->cfg_sg_seg_cnt,
3176 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3177 lpfc_cmd->seg_cnt = 0;
3178 scsi_dma_unmap(scsi_cmnd);
3183 * The driver established a maximum scatter-gather segment count
3184 * during probe that limits the number of sg elements in any
3185 * single scsi command. Just run through the seg_cnt and format
3187 * When using SLI-3 the driver will try to fit all the BDEs into
3188 * the IOCB. If it can't then the BDEs get added to a BPL as it
3189 * does for SLI-2 mode.
3192 /* for tracking segment boundaries */
3193 sgel = scsi_sglist(scsi_cmnd);
3195 for (i = 0; i < nseg; i++) {
3198 bf_set(lpfc_sli4_sge_last, sgl, 1);
3199 bf_set(lpfc_sli4_sge_type, sgl,
3200 LPFC_SGE_TYPE_DATA);
3202 bf_set(lpfc_sli4_sge_last, sgl, 0);
3204 /* do we need to expand the segment */
3205 if (!lsp_just_set &&
3206 !((j + 1) % phba->border_sge_num) &&
3207 ((nseg - 1) != i)) {
3209 bf_set(lpfc_sli4_sge_type, sgl,
3212 sgl_xtra = lpfc_get_sgl_per_hdwq(
3215 if (unlikely(!sgl_xtra)) {
3216 lpfc_cmd->seg_cnt = 0;
3217 scsi_dma_unmap(scsi_cmnd);
3220 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3221 sgl_xtra->dma_phys_sgl));
3222 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3223 sgl_xtra->dma_phys_sgl));
3226 bf_set(lpfc_sli4_sge_type, sgl,
3227 LPFC_SGE_TYPE_DATA);
3231 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3232 LPFC_SGE_TYPE_LSP)) {
3233 if ((nseg - 1) == i)
3234 bf_set(lpfc_sli4_sge_last, sgl, 1);
3236 physaddr = sg_dma_address(sgel);
3237 dma_len = sg_dma_len(sgel);
3238 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3240 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3243 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3244 sgl->word2 = cpu_to_le32(sgl->word2);
3245 sgl->sge_len = cpu_to_le32(dma_len);
3247 dma_offset += dma_len;
3248 sgel = sg_next(sgel);
3251 lsp_just_set = false;
3254 sgl->word2 = cpu_to_le32(sgl->word2);
3255 sgl->sge_len = cpu_to_le32(
3256 phba->cfg_sg_dma_buf_size);
3258 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3261 lsp_just_set = true;
3267 /* PBDE support for first data SGE only.
3268 * For FCoE, we key off Performance Hints.
3269 * For FC, we key off lpfc_enable_pbde.
3272 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3273 phba->cfg_enable_pbde)) {
3275 bde = (struct ulp_bde64 *)
3277 bde->addrLow = first_data_sgl->addr_lo;
3278 bde->addrHigh = first_data_sgl->addr_hi;
3279 bde->tus.f.bdeSize =
3280 le32_to_cpu(first_data_sgl->sge_len);
3281 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3282 bde->tus.w = cpu_to_le32(bde->tus.w);
3284 /* Word 11 - set PBDE bit */
3285 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3287 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3288 /* Word 11 - PBDE bit disabled by default template */
3292 /* set the last flag in the fcp_rsp map entry */
3293 sgl->word2 = le32_to_cpu(sgl->word2);
3294 bf_set(lpfc_sli4_sge_last, sgl, 1);
3295 sgl->word2 = cpu_to_le32(sgl->word2);
3297 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3298 phba->cfg_enable_pbde) {
3299 bde = (struct ulp_bde64 *)
3301 memset(bde, 0, (sizeof(uint32_t) * 3));
3306 * Finish initializing those IOCB fields that are dependent on the
3307 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3308 * explicitly reinitialized.
3309 * all iocb memory resources are reused.
3311 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3312 /* Set first-burst provided it was successfully negotiated */
3313 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3314 vport->cfg_first_burst_size &&
3315 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3316 u32 init_len, total_len;
3318 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3319 init_len = min(total_len, vport->cfg_first_burst_size);
3322 wqe->fcp_iwrite.initial_xfer_len = init_len;
3323 wqe->fcp_iwrite.total_xfer_len = total_len;
3326 wqe->fcp_iwrite.total_xfer_len =
3327 be32_to_cpu(fcp_cmnd->fcpDl);
3331 * If the OAS driver feature is enabled and the lun is enabled for
3332 * OAS, set the oas iocb related flags.
3334 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3335 scsi_cmnd->device->hostdata)->oas_enabled) {
3336 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3337 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3338 scsi_cmnd->device->hostdata)->priority;
3341 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3342 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3344 if (lpfc_cmd->cur_iocbq.priority)
3345 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3346 (lpfc_cmd->cur_iocbq.priority << 1));
3348 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3349 (phba->cfg_XLanePriority << 1));
3356 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3357 * @phba: The Hba for which this call is being executed.
3358 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3360 * This is the protection/DIF aware version of
3361 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3362 * two functions eventually, but for now, it's here
3364 * 2 - Error - Do not retry
3369 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3370 struct lpfc_io_buf *lpfc_cmd)
3372 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3373 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3374 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3375 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3376 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3377 uint32_t num_sge = 0;
3378 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3379 int prot_group_type = 0;
3382 struct lpfc_vport *vport = phba->pport;
3385 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3386 * fcp_rsp regions to the first data sge entry
3388 if (scsi_sg_count(scsi_cmnd)) {
3390 * The driver stores the segment count returned from dma_map_sg
3391 * because this a count of dma-mappings used to map the use_sg
3392 * pages. They are not guaranteed to be the same for those
3393 * architectures that implement an IOMMU.
3395 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3396 scsi_sglist(scsi_cmnd),
3397 scsi_sg_count(scsi_cmnd), datadir);
3398 if (unlikely(!datasegcnt))
3402 /* clear the last flag in the fcp_rsp map entry */
3403 sgl->word2 = le32_to_cpu(sgl->word2);
3404 bf_set(lpfc_sli4_sge_last, sgl, 0);
3405 sgl->word2 = cpu_to_le32(sgl->word2);
3408 lpfc_cmd->seg_cnt = datasegcnt;
3410 /* First check if data segment count from SCSI Layer is good */
3411 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3413 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3418 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3420 switch (prot_group_type) {
3421 case LPFC_PG_TYPE_NO_DIF:
3422 /* Here we need to add a DISEED to the count */
3423 if (((lpfc_cmd->seg_cnt + 1) >
3424 phba->cfg_total_seg_cnt) &&
3430 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3431 datasegcnt, lpfc_cmd);
3433 /* we should have 2 or more entries in buffer list */
3440 case LPFC_PG_TYPE_DIF_BUF:
3442 * This type indicates that protection buffers are
3443 * passed to the driver, so that needs to be prepared
3446 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3447 scsi_prot_sglist(scsi_cmnd),
3448 scsi_prot_sg_count(scsi_cmnd), datadir);
3449 if (unlikely(!protsegcnt)) {
3450 scsi_dma_unmap(scsi_cmnd);
3454 lpfc_cmd->prot_seg_cnt = protsegcnt;
3456 * There is a minimun of 3 SGEs used for every
3457 * protection data segment.
3459 if (((lpfc_cmd->prot_seg_cnt * 3) >
3460 (phba->cfg_total_seg_cnt - 2)) &&
3466 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3467 datasegcnt, protsegcnt, lpfc_cmd);
3469 /* we should have 3 or more entries in buffer list */
3471 (num_sge > phba->cfg_total_seg_cnt &&
3472 !phba->cfg_xpsgl)) {
3478 case LPFC_PG_TYPE_INVALID:
3480 scsi_dma_unmap(scsi_cmnd);
3481 lpfc_cmd->seg_cnt = 0;
3483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3484 "9083 Unexpected protection group %i\n",
3490 switch (scsi_get_prot_op(scsi_cmnd)) {
3491 case SCSI_PROT_WRITE_STRIP:
3492 case SCSI_PROT_READ_STRIP:
3493 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
3495 case SCSI_PROT_WRITE_INSERT:
3496 case SCSI_PROT_READ_INSERT:
3497 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
3499 case SCSI_PROT_WRITE_PASS:
3500 case SCSI_PROT_READ_PASS:
3501 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
3505 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3506 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3508 /* Set first-burst provided it was successfully negotiated */
3509 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3510 vport->cfg_first_burst_size &&
3511 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3512 u32 init_len, total_len;
3514 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3515 init_len = min(total_len, vport->cfg_first_burst_size);
3518 wqe->fcp_iwrite.initial_xfer_len = init_len;
3519 wqe->fcp_iwrite.total_xfer_len = total_len;
3522 wqe->fcp_iwrite.total_xfer_len =
3523 be32_to_cpu(fcp_cmnd->fcpDl);
3527 * If the OAS driver feature is enabled and the lun is enabled for
3528 * OAS, set the oas iocb related flags.
3530 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3531 scsi_cmnd->device->hostdata)->oas_enabled) {
3532 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3535 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3536 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3537 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3538 (phba->cfg_XLanePriority << 1));
3541 /* Word 7. DIF Flags */
3542 if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
3543 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3544 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
3545 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3546 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
3547 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3549 lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
3550 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3554 if (lpfc_cmd->seg_cnt)
3555 scsi_dma_unmap(scsi_cmnd);
3556 if (lpfc_cmd->prot_seg_cnt)
3557 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3558 scsi_prot_sg_count(scsi_cmnd),
3559 scsi_cmnd->sc_data_direction);
3561 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3562 "9084 Cannot setup S/G List for HBA"
3563 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3564 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3565 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3566 prot_group_type, num_sge);
3568 lpfc_cmd->seg_cnt = 0;
3569 lpfc_cmd->prot_seg_cnt = 0;
3574 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3575 * @phba: The Hba for which this call is being executed.
3576 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3578 * This routine wraps the actual DMA mapping function pointer from the
3586 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3588 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3592 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3594 * @phba: The Hba for which this call is being executed.
3595 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3597 * This routine wraps the actual DMA mapping function pointer from the
3605 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3607 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3611 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3613 * @vport: Pointer to vport object.
3614 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3615 * @tmo: Timeout value for IO
3617 * This routine initializes IOCB/WQE data structure from scsi command
3624 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3627 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3631 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3632 * @phba: Pointer to hba context object.
3633 * @vport: Pointer to vport object.
3634 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3635 * @fcpi_parm: FCP Initiator parameter.
3637 * This function posts an event when there is a SCSI command reporting
3638 * error from the scsi device.
3641 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3642 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3643 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3644 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3645 uint32_t resp_info = fcprsp->rspStatus2;
3646 uint32_t scsi_status = fcprsp->rspStatus3;
3647 struct lpfc_fast_path_event *fast_path_evt = NULL;
3648 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3649 unsigned long flags;
3654 /* If there is queuefull or busy condition send a scsi event */
3655 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3656 (cmnd->result == SAM_STAT_BUSY)) {
3657 fast_path_evt = lpfc_alloc_fast_evt(phba);
3660 fast_path_evt->un.scsi_evt.event_type =
3662 fast_path_evt->un.scsi_evt.subcategory =
3663 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3664 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3665 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3666 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3667 &pnode->nlp_portname, sizeof(struct lpfc_name));
3668 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3669 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3670 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3671 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3672 fast_path_evt = lpfc_alloc_fast_evt(phba);
3675 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3677 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3678 LPFC_EVENT_CHECK_COND;
3679 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3681 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3682 &pnode->nlp_portname, sizeof(struct lpfc_name));
3683 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3684 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3685 fast_path_evt->un.check_cond_evt.sense_key =
3686 cmnd->sense_buffer[2] & 0xf;
3687 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3688 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3689 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3691 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3692 ((scsi_status == SAM_STAT_GOOD) &&
3693 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3695 * If status is good or resid does not match with fcp_param and
3696 * there is valid fcpi_parm, then there is a read_check error
3698 fast_path_evt = lpfc_alloc_fast_evt(phba);
3701 fast_path_evt->un.read_check_error.header.event_type =
3702 FC_REG_FABRIC_EVENT;
3703 fast_path_evt->un.read_check_error.header.subcategory =
3704 LPFC_EVENT_FCPRDCHKERR;
3705 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3706 &pnode->nlp_portname, sizeof(struct lpfc_name));
3707 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3708 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3709 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3710 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3711 fast_path_evt->un.read_check_error.fcpiparam =
3716 fast_path_evt->vport = vport;
3717 spin_lock_irqsave(&phba->hbalock, flags);
3718 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3719 spin_unlock_irqrestore(&phba->hbalock, flags);
3720 lpfc_worker_wake_up(phba);
3725 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3726 * @phba: The HBA for which this call is being executed.
3727 * @psb: The scsi buffer which is going to be un-mapped.
3729 * This routine does DMA un-mapping of scatter gather list of scsi command
3730 * field of @lpfc_cmd for device with SLI-3 interface spec.
3733 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3736 * There are only two special cases to consider. (1) the scsi command
3737 * requested scatter-gather usage or (2) the scsi command allocated
3738 * a request buffer, but did not request use_sg. There is a third
3739 * case, but it does not require resource deallocation.
3741 if (psb->seg_cnt > 0)
3742 scsi_dma_unmap(psb->pCmd);
3743 if (psb->prot_seg_cnt > 0)
3744 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3745 scsi_prot_sg_count(psb->pCmd),
3746 psb->pCmd->sc_data_direction);
3750 * lpfc_unblock_requests - allow further commands to be queued.
3751 * @phba: pointer to phba object
3753 * For single vport, just call scsi_unblock_requests on physical port.
3754 * For multiple vports, send scsi_unblock_requests for all the vports.
3757 lpfc_unblock_requests(struct lpfc_hba *phba)
3759 struct lpfc_vport **vports;
3760 struct Scsi_Host *shost;
3763 if (phba->sli_rev == LPFC_SLI_REV4 &&
3764 !phba->sli4_hba.max_cfg_param.vpi_used) {
3765 shost = lpfc_shost_from_vport(phba->pport);
3766 scsi_unblock_requests(shost);
3770 vports = lpfc_create_vport_work_array(phba);
3772 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3773 shost = lpfc_shost_from_vport(vports[i]);
3774 scsi_unblock_requests(shost);
3776 lpfc_destroy_vport_work_array(phba, vports);
3780 * lpfc_block_requests - prevent further commands from being queued.
3781 * @phba: pointer to phba object
3783 * For single vport, just call scsi_block_requests on physical port.
3784 * For multiple vports, send scsi_block_requests for all the vports.
3787 lpfc_block_requests(struct lpfc_hba *phba)
3789 struct lpfc_vport **vports;
3790 struct Scsi_Host *shost;
3793 if (atomic_read(&phba->cmf_stop_io))
3796 if (phba->sli_rev == LPFC_SLI_REV4 &&
3797 !phba->sli4_hba.max_cfg_param.vpi_used) {
3798 shost = lpfc_shost_from_vport(phba->pport);
3799 scsi_block_requests(shost);
3803 vports = lpfc_create_vport_work_array(phba);
3805 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3806 shost = lpfc_shost_from_vport(vports[i]);
3807 scsi_block_requests(shost);
3809 lpfc_destroy_vport_work_array(phba, vports);
3813 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3814 * @phba: The HBA for which this call is being executed.
3815 * @time: The latency of the IO that completed (in ns)
3816 * @size: The size of the IO that completed
3817 * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3819 * The routine adjusts the various Burst and Bandwidth counters used in
3820 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3821 * that means the IO was never issued to the HBA, so this routine is
3822 * just being called to cleanup the counter from a previous
3823 * lpfc_update_cmf_cmd call.
3826 lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3827 uint64_t time, uint32_t size, struct Scsi_Host *shost)
3829 struct lpfc_cgn_stat *cgs;
3831 if (time != LPFC_CGN_NOT_SENT) {
3832 /* lat is ns coming in, save latency in us */
3836 time = div_u64(time + 500, 1000); /* round it */
3838 cgs = this_cpu_ptr(phba->cmf_stat);
3839 atomic64_add(size, &cgs->rcv_bytes);
3840 atomic64_add(time, &cgs->rx_latency);
3841 atomic_inc(&cgs->rx_io_cnt);
3847 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3848 * @phba: The HBA for which this call is being executed.
3849 * @size: The size of the IO that will be issued
3851 * The routine adjusts the various Burst and Bandwidth counters used in
3852 * Congestion management and E2E.
3855 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3858 struct lpfc_cgn_stat *cgs;
3861 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
3862 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3863 phba->cmf_max_bytes_per_interval) {
3865 for_each_present_cpu(cpu) {
3866 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3867 total += atomic64_read(&cgs->total_bytes);
3869 if (total >= phba->cmf_max_bytes_per_interval) {
3870 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3871 lpfc_block_requests(phba);
3873 lpfc_calc_cmf_latency(phba);
3875 atomic_inc(&phba->cmf_busy);
3878 if (size > atomic_read(&phba->rx_max_read_cnt))
3879 atomic_set(&phba->rx_max_read_cnt, size);
3882 cgs = this_cpu_ptr(phba->cmf_stat);
3883 atomic64_add(size, &cgs->total_bytes);
3888 * lpfc_handle_fcp_err - FCP response handler
3889 * @vport: The virtual port for which this call is being executed.
3890 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3891 * @fcpi_parm: FCP Initiator parameter.
3893 * This routine is called to process response IOCB with status field
3894 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3895 * based upon SCSI and FCP error.
3898 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3901 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3902 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3903 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3904 uint32_t resp_info = fcprsp->rspStatus2;
3905 uint32_t scsi_status = fcprsp->rspStatus3;
3907 uint32_t host_status = DID_OK;
3908 uint32_t rsplen = 0;
3910 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3914 * If this is a task management command, there is no
3915 * scsi packet associated with this lpfc_cmd. The driver
3918 if (fcpcmd->fcpCntl2) {
3923 if (resp_info & RSP_LEN_VALID) {
3924 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3925 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3926 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3927 "2719 Invalid response length: "
3928 "tgt x%x lun x%llx cmnd x%x rsplen "
3929 "x%x\n", cmnd->device->id,
3930 cmnd->device->lun, cmnd->cmnd[0],
3932 host_status = DID_ERROR;
3935 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3936 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3937 "2757 Protocol failure detected during "
3938 "processing of FCP I/O op: "
3939 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3941 cmnd->device->lun, cmnd->cmnd[0],
3943 host_status = DID_ERROR;
3948 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3949 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3950 if (snslen > SCSI_SENSE_BUFFERSIZE)
3951 snslen = SCSI_SENSE_BUFFERSIZE;
3953 if (resp_info & RSP_LEN_VALID)
3954 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3955 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3957 lp = (uint32_t *)cmnd->sense_buffer;
3959 /* special handling for under run conditions */
3960 if (!scsi_status && (resp_info & RESID_UNDER)) {
3961 /* don't log under runs if fcp set... */
3962 if (vport->cfg_log_verbose & LOG_FCP)
3963 logit = LOG_FCP_ERROR;
3964 /* unless operator says so */
3965 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3966 logit = LOG_FCP_UNDER;
3969 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3970 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3971 "Data: x%x x%x x%x x%x x%x\n",
3972 cmnd->cmnd[0], scsi_status,
3973 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3974 be32_to_cpu(fcprsp->rspResId),
3975 be32_to_cpu(fcprsp->rspSnsLen),
3976 be32_to_cpu(fcprsp->rspRspLen),
3979 scsi_set_resid(cmnd, 0);
3980 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3981 if (resp_info & RESID_UNDER) {
3982 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3984 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3985 "9025 FCP Underrun, expected %d, "
3986 "residual %d Data: x%x x%x x%x\n",
3988 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3992 * If there is an under run, check if under run reported by
3993 * storage array is same as the under run reported by HBA.
3994 * If this is not same, there is a dropped frame.
3996 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3997 lpfc_printf_vlog(vport, KERN_WARNING,
3998 LOG_FCP | LOG_FCP_ERROR,
3999 "9026 FCP Read Check Error "
4000 "and Underrun Data: x%x x%x x%x x%x\n",
4002 scsi_get_resid(cmnd), fcpi_parm,
4004 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4005 host_status = DID_ERROR;
4008 * The cmnd->underflow is the minimum number of bytes that must
4009 * be transferred for this command. Provided a sense condition
4010 * is not present, make sure the actual amount transferred is at
4011 * least the underflow value or fail.
4013 if (!(resp_info & SNS_LEN_VALID) &&
4014 (scsi_status == SAM_STAT_GOOD) &&
4015 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
4016 < cmnd->underflow)) {
4017 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4018 "9027 FCP command x%x residual "
4019 "underrun converted to error "
4020 "Data: x%x x%x x%x\n",
4021 cmnd->cmnd[0], scsi_bufflen(cmnd),
4022 scsi_get_resid(cmnd), cmnd->underflow);
4023 host_status = DID_ERROR;
4025 } else if (resp_info & RESID_OVER) {
4026 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4027 "9028 FCP command x%x residual overrun error. "
4028 "Data: x%x x%x\n", cmnd->cmnd[0],
4029 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
4030 host_status = DID_ERROR;
4033 * Check SLI validation that all the transfer was actually done
4034 * (fcpi_parm should be zero). Apply check only to reads.
4036 } else if (fcpi_parm) {
4037 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4038 "9029 FCP %s Check Error Data: "
4039 "x%x x%x x%x x%x x%x\n",
4040 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4042 fcpDl, be32_to_cpu(fcprsp->rspResId),
4043 fcpi_parm, cmnd->cmnd[0], scsi_status);
4045 /* There is some issue with the LPe12000 that causes it
4046 * to miscalculate the fcpi_parm and falsely trip this
4047 * recovery logic. Detect this case and don't error when true.
4049 if (fcpi_parm > fcpDl)
4052 switch (scsi_status) {
4054 case SAM_STAT_CHECK_CONDITION:
4055 /* Fabric dropped a data frame. Fail any successful
4056 * command in which we detected dropped frames.
4057 * A status of good or some check conditions could
4058 * be considered a successful command.
4060 host_status = DID_ERROR;
4063 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4067 cmnd->result = host_status << 16 | scsi_status;
4068 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4072 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4073 * @phba: The hba for which this call is being executed.
4074 * @pwqeIn: The command WQE for the scsi cmnd.
4075 * @pwqeOut: Pointer to driver response WQE object.
4077 * This routine assigns scsi command result by looking into response WQE
4078 * status field appropriately. This routine handles QUEUE FULL condition as
4079 * well by ramping down device queue depth.
4082 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4083 struct lpfc_iocbq *pwqeOut)
4085 struct lpfc_io_buf *lpfc_cmd =
4086 (struct lpfc_io_buf *)pwqeIn->context1;
4087 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
4088 struct lpfc_vport *vport = pwqeIn->vport;
4089 struct lpfc_rport_data *rdata;
4090 struct lpfc_nodelist *ndlp;
4091 struct scsi_cmnd *cmd;
4092 unsigned long flags;
4093 struct lpfc_fast_path_event *fast_path_evt;
4094 struct Scsi_Host *shost;
4095 u32 logit = LOG_FCP;
4100 /* Sanity check on return of outstanding command */
4102 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4103 "9032 Null lpfc_cmd pointer. No "
4104 "release, skip completion\n");
4108 rdata = lpfc_cmd->rdata;
4109 ndlp = rdata->pnode;
4111 /* Sanity check on return of outstanding command */
4112 cmd = lpfc_cmd->pCmd;
4114 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4115 "9042 I/O completion: Not an active IO\n");
4116 lpfc_release_scsi_buf(phba, lpfc_cmd);
4119 /* Guard against abort handler being called at same time */
4120 spin_lock(&lpfc_cmd->buf_lock);
4121 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4122 if (phba->sli4_hba.hdwq)
4123 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4125 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4126 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4127 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4129 shost = cmd->device->host;
4131 status = bf_get(lpfc_wcqe_c_status, wcqe);
4132 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4133 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4135 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4136 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4137 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4138 if (phba->cfg_fcp_wait_abts_rsp)
4142 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4143 if (lpfc_cmd->prot_data_type) {
4144 struct scsi_dif_tuple *src = NULL;
4146 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4148 * Used to restore any changes to protection
4149 * data for error injection.
4151 switch (lpfc_cmd->prot_data_type) {
4152 case LPFC_INJERR_REFTAG:
4154 lpfc_cmd->prot_data;
4156 case LPFC_INJERR_APPTAG:
4158 (uint16_t)lpfc_cmd->prot_data;
4160 case LPFC_INJERR_GUARD:
4162 (uint16_t)lpfc_cmd->prot_data;
4168 lpfc_cmd->prot_data = 0;
4169 lpfc_cmd->prot_data_type = 0;
4170 lpfc_cmd->prot_data_segment = NULL;
4173 if (unlikely(lpfc_cmd->status)) {
4174 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4175 (lpfc_cmd->result & IOERR_DRVR_MASK))
4176 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4177 else if (lpfc_cmd->status >= IOSTAT_CNT)
4178 lpfc_cmd->status = IOSTAT_DEFAULT;
4179 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4180 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4181 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4182 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4185 logit = LOG_FCP | LOG_FCP_UNDER;
4186 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4187 "9034 FCP cmd x%x failed <%d/%lld> "
4188 "status: x%x result: x%x "
4189 "sid: x%x did: x%x oxid: x%x "
4190 "Data: x%x x%x x%x\n",
4192 cmd->device ? cmd->device->id : 0xffff,
4193 cmd->device ? cmd->device->lun : 0xffff,
4194 lpfc_cmd->status, lpfc_cmd->result,
4196 (ndlp) ? ndlp->nlp_DID : 0,
4197 lpfc_cmd->cur_iocbq.sli4_xritag,
4198 wcqe->parameter, wcqe->total_data_placed,
4199 lpfc_cmd->cur_iocbq.iotag);
4202 switch (lpfc_cmd->status) {
4203 case IOSTAT_SUCCESS:
4204 cmd->result = DID_OK << 16;
4206 case IOSTAT_FCP_RSP_ERROR:
4207 lpfc_handle_fcp_err(vport, lpfc_cmd,
4208 pwqeIn->wqe.fcp_iread.total_xfer_len -
4209 wcqe->total_data_placed);
4211 case IOSTAT_NPORT_BSY:
4212 case IOSTAT_FABRIC_BSY:
4213 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4214 fast_path_evt = lpfc_alloc_fast_evt(phba);
4217 fast_path_evt->un.fabric_evt.event_type =
4218 FC_REG_FABRIC_EVENT;
4219 fast_path_evt->un.fabric_evt.subcategory =
4220 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4221 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4223 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4224 &ndlp->nlp_portname,
4225 sizeof(struct lpfc_name));
4226 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4227 &ndlp->nlp_nodename,
4228 sizeof(struct lpfc_name));
4230 fast_path_evt->vport = vport;
4231 fast_path_evt->work_evt.evt =
4232 LPFC_EVT_FASTPATH_MGMT_EVT;
4233 spin_lock_irqsave(&phba->hbalock, flags);
4234 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4236 spin_unlock_irqrestore(&phba->hbalock, flags);
4237 lpfc_worker_wake_up(phba);
4238 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4239 "9035 Fabric/Node busy FCP cmd x%x failed"
4241 "status: x%x result: x%x "
4242 "sid: x%x did: x%x oxid: x%x "
4243 "Data: x%x x%x x%x\n",
4245 cmd->device ? cmd->device->id : 0xffff,
4246 cmd->device ? cmd->device->lun : 0xffff,
4247 lpfc_cmd->status, lpfc_cmd->result,
4249 (ndlp) ? ndlp->nlp_DID : 0,
4250 lpfc_cmd->cur_iocbq.sli4_xritag,
4252 wcqe->total_data_placed,
4253 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4255 case IOSTAT_REMOTE_STOP:
4257 /* This I/O was aborted by the target, we don't
4258 * know the rxid and because we did not send the
4259 * ABTS we cannot generate and RRQ.
4261 lpfc_set_rrq_active(phba, ndlp,
4262 lpfc_cmd->cur_iocbq.sli4_lxritag,
4266 case IOSTAT_LOCAL_REJECT:
4267 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4268 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4269 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4271 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4272 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4274 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4275 cmd->result = DID_NO_CONNECT << 16;
4278 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4279 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4280 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4281 lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4282 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4283 cmd->result = DID_REQUEUE << 16;
4286 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4287 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4288 status == CQE_STATUS_DI_ERROR) {
4289 if (scsi_get_prot_op(cmd) !=
4292 * This is a response for a BG enabled
4293 * cmd. Parse BG error
4295 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4298 lpfc_printf_vlog(vport, KERN_WARNING,
4300 "9040 non-zero BGSTAT "
4301 "on unprotected cmd\n");
4304 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4305 "9036 Local Reject FCP cmd x%x failed"
4307 "status: x%x result: x%x "
4308 "sid: x%x did: x%x oxid: x%x "
4309 "Data: x%x x%x x%x\n",
4311 cmd->device ? cmd->device->id : 0xffff,
4312 cmd->device ? cmd->device->lun : 0xffff,
4313 lpfc_cmd->status, lpfc_cmd->result,
4315 (ndlp) ? ndlp->nlp_DID : 0,
4316 lpfc_cmd->cur_iocbq.sli4_xritag,
4318 wcqe->total_data_placed,
4319 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4322 if (lpfc_cmd->status >= IOSTAT_CNT)
4323 lpfc_cmd->status = IOSTAT_DEFAULT;
4324 cmd->result = DID_ERROR << 16;
4325 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4326 "9037 FCP Completion Error: xri %x "
4327 "status x%x result x%x [x%x] "
4329 lpfc_cmd->cur_iocbq.sli4_xritag,
4330 lpfc_cmd->status, lpfc_cmd->result,
4332 wcqe->total_data_placed);
4334 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4335 u32 *lp = (u32 *)cmd->sense_buffer;
4337 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4338 "9039 Iodone <%d/%llu> cmd x%px, error "
4339 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4340 cmd->device->id, cmd->device->lun, cmd,
4341 cmd->result, *lp, *(lp + 3),
4342 (u64)scsi_get_lba(cmd),
4343 cmd->retries, scsi_get_resid(cmd));
4346 lpfc_update_stats(vport, lpfc_cmd);
4348 if (vport->cfg_max_scsicmpl_time &&
4349 time_after(jiffies, lpfc_cmd->start_time +
4350 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4351 spin_lock_irqsave(shost->host_lock, flags);
4353 if (ndlp->cmd_qdepth >
4354 atomic_read(&ndlp->cmd_pending) &&
4355 (atomic_read(&ndlp->cmd_pending) >
4356 LPFC_MIN_TGT_QDEPTH) &&
4357 (cmd->cmnd[0] == READ_10 ||
4358 cmd->cmnd[0] == WRITE_10))
4360 atomic_read(&ndlp->cmd_pending);
4362 ndlp->last_change_time = jiffies;
4364 spin_unlock_irqrestore(shost->host_lock, flags);
4366 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4368 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4369 if (lpfc_cmd->ts_cmd_start) {
4370 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4371 lpfc_cmd->ts_data_io = ktime_get_ns();
4372 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4373 lpfc_io_ktime(phba, lpfc_cmd);
4376 if (likely(!wait_xb_clr))
4377 lpfc_cmd->pCmd = NULL;
4378 spin_unlock(&lpfc_cmd->buf_lock);
4380 /* Check if IO qualified for CMF */
4381 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4382 cmd->sc_data_direction == DMA_FROM_DEVICE &&
4383 (scsi_sg_count(cmd))) {
4384 /* Used when calculating average latency */
4385 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4386 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4392 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4396 * If there is an abort thread waiting for command completion
4397 * wake up the thread.
4399 spin_lock(&lpfc_cmd->buf_lock);
4400 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4401 if (lpfc_cmd->waitq)
4402 wake_up(lpfc_cmd->waitq);
4403 spin_unlock(&lpfc_cmd->buf_lock);
4405 lpfc_release_scsi_buf(phba, lpfc_cmd);
4409 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4410 * @phba: The Hba for which this call is being executed.
4411 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4412 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4414 * This routine assigns scsi command result by looking into response IOCB
4415 * status field appropriately. This routine handles QUEUE FULL condition as
4416 * well by ramping down device queue depth.
4419 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4420 struct lpfc_iocbq *pIocbOut)
4422 struct lpfc_io_buf *lpfc_cmd =
4423 (struct lpfc_io_buf *) pIocbIn->context1;
4424 struct lpfc_vport *vport = pIocbIn->vport;
4425 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4426 struct lpfc_nodelist *pnode = rdata->pnode;
4427 struct scsi_cmnd *cmd;
4428 unsigned long flags;
4429 struct lpfc_fast_path_event *fast_path_evt;
4430 struct Scsi_Host *shost;
4432 uint32_t logit = LOG_FCP;
4434 /* Guard against abort handler being called at same time */
4435 spin_lock(&lpfc_cmd->buf_lock);
4437 /* Sanity check on return of outstanding command */
4438 cmd = lpfc_cmd->pCmd;
4439 if (!cmd || !phba) {
4440 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4441 "2621 IO completion: Not an active IO\n");
4442 spin_unlock(&lpfc_cmd->buf_lock);
4446 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4447 if (phba->sli4_hba.hdwq)
4448 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4450 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4451 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4452 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4454 shost = cmd->device->host;
4456 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4457 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4458 /* pick up SLI4 exchange busy status from HBA */
4459 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4460 if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
4461 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4463 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4464 if (lpfc_cmd->prot_data_type) {
4465 struct scsi_dif_tuple *src = NULL;
4467 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4469 * Used to restore any changes to protection
4470 * data for error injection.
4472 switch (lpfc_cmd->prot_data_type) {
4473 case LPFC_INJERR_REFTAG:
4475 lpfc_cmd->prot_data;
4477 case LPFC_INJERR_APPTAG:
4479 (uint16_t)lpfc_cmd->prot_data;
4481 case LPFC_INJERR_GUARD:
4483 (uint16_t)lpfc_cmd->prot_data;
4489 lpfc_cmd->prot_data = 0;
4490 lpfc_cmd->prot_data_type = 0;
4491 lpfc_cmd->prot_data_segment = NULL;
4495 if (unlikely(lpfc_cmd->status)) {
4496 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4497 (lpfc_cmd->result & IOERR_DRVR_MASK))
4498 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4499 else if (lpfc_cmd->status >= IOSTAT_CNT)
4500 lpfc_cmd->status = IOSTAT_DEFAULT;
4501 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4502 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4503 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4504 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4507 logit = LOG_FCP | LOG_FCP_UNDER;
4508 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4509 "9030 FCP cmd x%x failed <%d/%lld> "
4510 "status: x%x result: x%x "
4511 "sid: x%x did: x%x oxid: x%x "
4514 cmd->device ? cmd->device->id : 0xffff,
4515 cmd->device ? cmd->device->lun : 0xffff,
4516 lpfc_cmd->status, lpfc_cmd->result,
4518 (pnode) ? pnode->nlp_DID : 0,
4519 phba->sli_rev == LPFC_SLI_REV4 ?
4520 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4521 pIocbOut->iocb.ulpContext,
4522 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4524 switch (lpfc_cmd->status) {
4525 case IOSTAT_FCP_RSP_ERROR:
4526 /* Call FCP RSP handler to determine result */
4527 lpfc_handle_fcp_err(vport, lpfc_cmd,
4528 pIocbOut->iocb.un.fcpi.fcpi_parm);
4530 case IOSTAT_NPORT_BSY:
4531 case IOSTAT_FABRIC_BSY:
4532 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4533 fast_path_evt = lpfc_alloc_fast_evt(phba);
4536 fast_path_evt->un.fabric_evt.event_type =
4537 FC_REG_FABRIC_EVENT;
4538 fast_path_evt->un.fabric_evt.subcategory =
4539 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4540 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4542 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4543 &pnode->nlp_portname,
4544 sizeof(struct lpfc_name));
4545 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4546 &pnode->nlp_nodename,
4547 sizeof(struct lpfc_name));
4549 fast_path_evt->vport = vport;
4550 fast_path_evt->work_evt.evt =
4551 LPFC_EVT_FASTPATH_MGMT_EVT;
4552 spin_lock_irqsave(&phba->hbalock, flags);
4553 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4555 spin_unlock_irqrestore(&phba->hbalock, flags);
4556 lpfc_worker_wake_up(phba);
4558 case IOSTAT_LOCAL_REJECT:
4559 case IOSTAT_REMOTE_STOP:
4560 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4562 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4563 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4565 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4566 cmd->result = DID_NO_CONNECT << 16;
4569 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4570 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4571 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4572 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4573 cmd->result = DID_REQUEUE << 16;
4576 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4577 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4578 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4579 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4581 * This is a response for a BG enabled
4582 * cmd. Parse BG error
4584 lpfc_parse_bg_err(phba, lpfc_cmd,
4588 lpfc_printf_vlog(vport, KERN_WARNING,
4590 "9031 non-zero BGSTAT "
4591 "on unprotected cmd\n");
4594 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4595 && (phba->sli_rev == LPFC_SLI_REV4)
4597 /* This IO was aborted by the target, we don't
4598 * know the rxid and because we did not send the
4599 * ABTS we cannot generate and RRQ.
4601 lpfc_set_rrq_active(phba, pnode,
4602 lpfc_cmd->cur_iocbq.sli4_lxritag,
4607 cmd->result = DID_ERROR << 16;
4611 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4612 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4615 cmd->result = DID_OK << 16;
4617 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4618 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4620 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4621 "0710 Iodone <%d/%llu> cmd x%px, error "
4622 "x%x SNS x%x x%x Data: x%x x%x\n",
4623 cmd->device->id, cmd->device->lun, cmd,
4624 cmd->result, *lp, *(lp + 3), cmd->retries,
4625 scsi_get_resid(cmd));
4628 lpfc_update_stats(vport, lpfc_cmd);
4629 if (vport->cfg_max_scsicmpl_time &&
4630 time_after(jiffies, lpfc_cmd->start_time +
4631 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4632 spin_lock_irqsave(shost->host_lock, flags);
4634 if (pnode->cmd_qdepth >
4635 atomic_read(&pnode->cmd_pending) &&
4636 (atomic_read(&pnode->cmd_pending) >
4637 LPFC_MIN_TGT_QDEPTH) &&
4638 ((cmd->cmnd[0] == READ_10) ||
4639 (cmd->cmnd[0] == WRITE_10)))
4641 atomic_read(&pnode->cmd_pending);
4643 pnode->last_change_time = jiffies;
4645 spin_unlock_irqrestore(shost->host_lock, flags);
4647 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4649 lpfc_cmd->pCmd = NULL;
4650 spin_unlock(&lpfc_cmd->buf_lock);
4652 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4653 if (lpfc_cmd->ts_cmd_start) {
4654 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4655 lpfc_cmd->ts_data_io = ktime_get_ns();
4656 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4657 lpfc_io_ktime(phba, lpfc_cmd);
4661 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4665 * If there is an abort thread waiting for command completion
4666 * wake up the thread.
4668 spin_lock(&lpfc_cmd->buf_lock);
4669 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4670 if (lpfc_cmd->waitq)
4671 wake_up(lpfc_cmd->waitq);
4672 spin_unlock(&lpfc_cmd->buf_lock);
4674 lpfc_release_scsi_buf(phba, lpfc_cmd);
4678 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4679 * @vport: Pointer to vport object.
4680 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4681 * @tmo: timeout value for the IO
4683 * Based on the data-direction of the command, initialize IOCB
4684 * in the I/O buffer. Fill in the IOCB fields which are independent
4685 * of the scsi buffer
4687 * RETURNS 0 - SUCCESS,
4689 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4690 struct lpfc_io_buf *lpfc_cmd,
4693 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4694 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4695 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4696 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4697 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4698 int datadir = scsi_cmnd->sc_data_direction;
4701 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4704 * There are three possibilities here - use scatter-gather segment, use
4705 * the single mapping, or neither. Start the lpfc command prep by
4706 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4709 if (scsi_sg_count(scsi_cmnd)) {
4710 if (datadir == DMA_TO_DEVICE) {
4711 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4712 iocb_cmd->ulpPU = PARM_READ_CHECK;
4713 if (vport->cfg_first_burst_size &&
4714 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4717 fcpdl = scsi_bufflen(scsi_cmnd);
4718 xrdy_len = min(fcpdl,
4719 vport->cfg_first_burst_size);
4720 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4722 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4724 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4725 iocb_cmd->ulpPU = PARM_READ_CHECK;
4726 fcp_cmnd->fcpCntl3 = READ_DATA;
4729 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4730 iocb_cmd->un.fcpi.fcpi_parm = 0;
4731 iocb_cmd->ulpPU = 0;
4732 fcp_cmnd->fcpCntl3 = 0;
4736 * Finish initializing those IOCB fields that are independent
4737 * of the scsi_cmnd request_buffer
4739 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4740 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4741 piocbq->iocb.ulpFCP2Rcvy = 1;
4743 piocbq->iocb.ulpFCP2Rcvy = 0;
4745 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4746 piocbq->context1 = lpfc_cmd;
4747 if (!piocbq->cmd_cmpl)
4748 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4749 piocbq->iocb.ulpTimeout = tmo;
4750 piocbq->vport = vport;
4755 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4756 * @vport: Pointer to vport object.
4757 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4758 * @tmo: timeout value for the IO
4760 * Based on the data-direction of the command copy WQE template
4761 * to I/O buffer WQE. Fill in the WQE fields which are independent
4762 * of the scsi buffer
4764 * RETURNS 0 - SUCCESS,
4766 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4767 struct lpfc_io_buf *lpfc_cmd,
4770 struct lpfc_hba *phba = vport->phba;
4771 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4772 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4773 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4774 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4775 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4776 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4777 u16 idx = lpfc_cmd->hdwq_no;
4778 int datadir = scsi_cmnd->sc_data_direction;
4780 hdwq = &phba->sli4_hba.hdwq[idx];
4782 /* Initialize 64 bytes only */
4783 memset(wqe, 0, sizeof(union lpfc_wqe128));
4786 * There are three possibilities here - use scatter-gather segment, use
4787 * the single mapping, or neither.
4789 if (scsi_sg_count(scsi_cmnd)) {
4790 if (datadir == DMA_TO_DEVICE) {
4791 /* From the iwrite template, initialize words 7 - 11 */
4792 memcpy(&wqe->words[7],
4793 &lpfc_iwrite_cmd_template.words[7],
4794 sizeof(uint32_t) * 5);
4796 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4798 hdwq->scsi_cstat.output_requests++;
4800 /* From the iread template, initialize words 7 - 11 */
4801 memcpy(&wqe->words[7],
4802 &lpfc_iread_cmd_template.words[7],
4803 sizeof(uint32_t) * 5);
4806 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4808 fcp_cmnd->fcpCntl3 = READ_DATA;
4810 hdwq->scsi_cstat.input_requests++;
4812 /* For a CMF Managed port, iod must be zero'ed */
4813 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4814 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4818 /* From the icmnd template, initialize words 4 - 11 */
4819 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4820 sizeof(uint32_t) * 8);
4823 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4825 fcp_cmnd->fcpCntl3 = 0;
4827 hdwq->scsi_cstat.control_requests++;
4831 * Finish initializing those WQE fields that are independent
4832 * of the request_buffer
4836 bf_set(payload_offset_len, &wqe->fcp_icmd,
4837 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4840 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4841 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4842 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4845 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4846 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4848 bf_set(wqe_class, &wqe->generic.wqe_com,
4849 (pnode->nlp_fcp_info & 0x0f));
4852 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4855 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4857 pwqeq->vport = vport;
4858 pwqeq->vport = vport;
4859 pwqeq->context1 = lpfc_cmd;
4860 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4861 pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4867 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4868 * @vport: The virtual port for which this call is being executed.
4869 * @lpfc_cmd: The scsi command which needs to send.
4870 * @pnode: Pointer to lpfc_nodelist.
4872 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4873 * to transfer for device with SLI3 interface spec.
4876 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4877 struct lpfc_nodelist *pnode)
4879 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4880 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4886 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4887 /* clear task management bits */
4888 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4890 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4891 &lpfc_cmd->fcp_cmnd->fcp_lun);
4893 ptr = &fcp_cmnd->fcpCdb[0];
4894 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4895 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4896 ptr += scsi_cmnd->cmd_len;
4897 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4900 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4902 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4908 * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
4909 * @vport: The virtual port for which this call is being executed.
4910 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4911 * @lun: Logical unit number.
4912 * @task_mgmt_cmd: SCSI task management command.
4914 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4915 * for device with SLI-3 interface spec.
4922 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
4923 struct lpfc_io_buf *lpfc_cmd,
4924 u64 lun, u8 task_mgmt_cmd)
4926 struct lpfc_iocbq *piocbq;
4928 struct fcp_cmnd *fcp_cmnd;
4929 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4930 struct lpfc_nodelist *ndlp = rdata->pnode;
4932 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4935 piocbq = &(lpfc_cmd->cur_iocbq);
4936 piocbq->vport = vport;
4938 piocb = &piocbq->iocb;
4940 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4941 /* Clear out any old data in the FCP command area */
4942 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4943 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4944 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4945 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4946 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4947 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4948 piocb->ulpContext = ndlp->nlp_rpi;
4949 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4950 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4952 piocb->un.fcpi.fcpi_parm = 0;
4954 /* ulpTimeout is only one byte */
4955 if (lpfc_cmd->timeout > 0xff) {
4957 * Do not timeout the command at the firmware level.
4958 * The driver will provide the timeout mechanism.
4960 piocb->ulpTimeout = 0;
4962 piocb->ulpTimeout = lpfc_cmd->timeout;
4968 * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
4969 * @vport: The virtual port for which this call is being executed.
4970 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4971 * @lun: Logical unit number.
4972 * @task_mgmt_cmd: SCSI task management command.
4974 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4975 * for device with SLI-4 interface spec.
4982 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
4983 struct lpfc_io_buf *lpfc_cmd,
4984 u64 lun, u8 task_mgmt_cmd)
4986 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4987 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4988 struct fcp_cmnd *fcp_cmnd;
4989 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4990 struct lpfc_nodelist *ndlp = rdata->pnode;
4992 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4995 pwqeq->vport = vport;
4996 /* Initialize 64 bytes only */
4997 memset(wqe, 0, sizeof(union lpfc_wqe128));
4999 /* From the icmnd template, initialize words 4 - 11 */
5000 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
5001 sizeof(uint32_t) * 8);
5003 fcp_cmnd = lpfc_cmd->fcp_cmnd;
5004 /* Clear out any old data in the FCP command area */
5005 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
5006 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
5007 fcp_cmnd->fcpCntl3 = 0;
5008 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
5010 bf_set(payload_offset_len, &wqe->fcp_icmd,
5011 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
5012 bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
5013 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
5014 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
5015 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
5016 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
5017 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
5018 (ndlp->nlp_fcp_info & 0x0f));
5020 /* ulpTimeout is only one byte */
5021 if (lpfc_cmd->timeout > 0xff) {
5023 * Do not timeout the command at the firmware level.
5024 * The driver will provide the timeout mechanism.
5026 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
5028 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
5031 lpfc_prep_embed_io(vport->phba, lpfc_cmd);
5032 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
5033 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
5034 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
5036 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
5042 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
5043 * @phba: The hba struct for which this call is being executed.
5044 * @dev_grp: The HBA PCI-Device group number.
5046 * This routine sets up the SCSI interface API function jump table in @phba
5048 * Returns: 0 - success, -ENODEV - failure.
5051 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5054 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
5057 case LPFC_PCI_DEV_LP:
5058 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
5059 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
5060 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
5061 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
5062 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
5063 phba->lpfc_scsi_prep_task_mgmt_cmd =
5064 lpfc_scsi_prep_task_mgmt_cmd_s3;
5066 case LPFC_PCI_DEV_OC:
5067 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
5068 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
5069 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
5070 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
5071 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
5072 phba->lpfc_scsi_prep_task_mgmt_cmd =
5073 lpfc_scsi_prep_task_mgmt_cmd_s4;
5076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5077 "1418 Invalid HBA PCI-device group: 0x%x\n",
5081 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5082 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
5087 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
5088 * @phba: The Hba for which this call is being executed.
5089 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5090 * @rspiocbq: Pointer to lpfc_iocbq data structure.
5092 * This routine is IOCB completion routine for device reset and target reset
5093 * routine. This routine release scsi buffer associated with lpfc_cmd.
5096 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5097 struct lpfc_iocbq *cmdiocbq,
5098 struct lpfc_iocbq *rspiocbq)
5100 struct lpfc_io_buf *lpfc_cmd =
5101 (struct lpfc_io_buf *) cmdiocbq->context1;
5103 lpfc_release_scsi_buf(phba, lpfc_cmd);
5108 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5109 * if issuing a pci_bus_reset is possibly unsafe
5110 * @phba: lpfc_hba pointer.
5113 * Walks the bus_list to ensure only PCI devices with Emulex
5114 * vendor id, device ids that support hot reset, and only one occurrence
5118 * -EBADSLT, detected invalid device
5122 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5124 const struct pci_dev *pdev = phba->pcidev;
5125 struct pci_dev *ptr = NULL;
5128 /* Walk the list of devices on the pci_dev's bus */
5129 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5130 /* Check for Emulex Vendor ID */
5131 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5132 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5133 "8346 Non-Emulex vendor found: "
5134 "0x%04x\n", ptr->vendor);
5138 /* Check for valid Emulex Device ID */
5139 if (phba->sli_rev != LPFC_SLI_REV4 ||
5140 phba->hba_flag & HBA_FCOE_MODE) {
5141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5142 "8347 Incapable PCI reset device: "
5143 "0x%04x\n", ptr->device);
5147 /* Check for only one function 0 ID to ensure only one HBA on
5150 if (ptr->devfn == 0) {
5151 if (++counter > 1) {
5152 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5153 "8348 More than one device on "
5154 "secondary bus found\n");
5164 * lpfc_info - Info entry point of scsi_host_template data structure
5165 * @host: The scsi host for which this call is being executed.
5167 * This routine provides module information about hba.
5170 * Pointer to char - Success.
5173 lpfc_info(struct Scsi_Host *host)
5175 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5176 struct lpfc_hba *phba = vport->phba;
5178 static char lpfcinfobuf[384];
5179 char tmp[384] = {0};
5181 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5182 if (phba && phba->pcidev){
5183 /* Model Description */
5184 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5185 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5186 sizeof(lpfcinfobuf))
5190 scnprintf(tmp, sizeof(tmp),
5191 " on PCI bus %02x device %02x irq %d",
5192 phba->pcidev->bus->number, phba->pcidev->devfn,
5194 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5195 sizeof(lpfcinfobuf))
5199 if (phba->Port[0]) {
5200 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5201 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5202 sizeof(lpfcinfobuf))
5207 link_speed = lpfc_sli_port_speed_get(phba);
5208 if (link_speed != 0) {
5209 scnprintf(tmp, sizeof(tmp),
5210 " Logical Link Speed: %d Mbps", link_speed);
5211 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5212 sizeof(lpfcinfobuf))
5216 /* PCI resettable */
5217 if (!lpfc_check_pci_resettable(phba)) {
5218 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5219 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5228 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5229 * @phba: The Hba for which this call is being executed.
5231 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5232 * The default value of cfg_poll_tmo is 10 milliseconds.
5234 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5236 unsigned long poll_tmo_expires =
5237 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5239 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5240 mod_timer(&phba->fcp_poll_timer,
5245 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5246 * @phba: The Hba for which this call is being executed.
5248 * This routine starts the fcp_poll_timer of @phba.
5250 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5252 lpfc_poll_rearm_timer(phba);
5256 * lpfc_poll_timeout - Restart polling timer
5257 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5259 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5260 * and FCP Ring interrupt is disable.
5262 void lpfc_poll_timeout(struct timer_list *t)
5264 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5266 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5267 lpfc_sli_handle_fast_ring_event(phba,
5268 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5270 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5271 lpfc_poll_rearm_timer(phba);
5276 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
5277 * @vport: The virtual port for which this call is being executed.
5278 * @hash: calculated hash value
5279 * @buf: uuid associated with the VE
5280 * Return the VMID entry associated with the UUID
5281 * Make sure to acquire the appropriate lock before invoking this routine.
5283 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
5286 struct lpfc_vmid *vmp;
5288 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
5289 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
5296 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
5297 * @vport: The virtual port for which this call is being executed.
5298 * @hash - calculated hash value
5299 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5301 * This routine will insert the newly acquired VMID entity in the hash table.
5302 * Make sure to acquire the appropriate lock before invoking this routine.
5305 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
5306 struct lpfc_vmid *vmp)
5308 hash_add(vport->hash_table, &vmp->hnode, hash);
5312 * lpfc_vmid_hash_fn - create a hash value of the UUID
5313 * @vmid: uuid associated with the VE
5314 * @len: length of the VMID string
5315 * Returns the calculated hash value
5317 int lpfc_vmid_hash_fn(const char *vmid, int len)
5326 if (c >= 'A' && c <= 'Z')
5329 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
5330 (c >> LPFC_VMID_HASH_SHIFT)) * 19;
5333 return hash & LPFC_VMID_HASH_MASK;
5337 * lpfc_vmid_update_entry - update the vmid entry in the hash table
5338 * @vport: The virtual port for which this call is being executed.
5339 * @cmd: address of scsi cmd descriptor
5340 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5343 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
5344 *cmd, struct lpfc_vmid *vmp,
5345 union lpfc_vmid_io_tag *tag)
5349 if (vport->vmid_priority_tagging)
5350 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
5352 tag->app_id = vmp->un.app_id;
5354 if (cmd->sc_data_direction == DMA_TO_DEVICE)
5359 /* update the last access timestamp in the table */
5360 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
5364 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
5365 struct lpfc_vmid *vmid)
5368 struct lpfc_vmid *pvmid;
5370 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5371 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5373 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
5375 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
5378 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
5380 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5385 * lpfc_vmid_get_appid - get the VMID associated with the UUID
5386 * @vport: The virtual port for which this call is being executed.
5387 * @uuid: UUID associated with the VE
5388 * @cmd: address of scsi_cmd descriptor
5390 * Returns status of the function
5392 static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
5393 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
5395 struct lpfc_vmid *vmp = NULL;
5396 int hash, len, rc, i;
5398 /* check if QFPA is complete */
5399 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
5400 LPFC_VMID_QFPA_CMPL)) {
5401 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5405 /* search if the UUID has already been mapped to the VMID */
5407 hash = lpfc_vmid_hash_fn(uuid, len);
5409 /* search for the VMID in the table */
5410 read_lock(&vport->vmid_lock);
5411 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5413 /* if found, check if its already registered */
5414 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5415 read_unlock(&vport->vmid_lock);
5416 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5418 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
5419 vmp->flag & LPFC_VMID_DE_REGISTER)) {
5420 /* else if register or dereg request has already been sent */
5421 /* Hence VMID tag will not be added for this I/O */
5422 read_unlock(&vport->vmid_lock);
5425 /* The VMID was not found in the hashtable. At this point, */
5426 /* drop the read lock first before proceeding further */
5427 read_unlock(&vport->vmid_lock);
5428 /* start the process to obtain one as per the */
5429 /* type of the VMID indicated */
5430 write_lock(&vport->vmid_lock);
5431 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5433 /* while the read lock was released, in case the entry was */
5434 /* added by other context or is in process of being added */
5435 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5436 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5437 write_unlock(&vport->vmid_lock);
5439 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
5440 write_unlock(&vport->vmid_lock);
5444 /* else search and allocate a free slot in the hash table */
5445 if (vport->cur_vmid_cnt < vport->max_vmid) {
5446 for (i = 0; i < vport->max_vmid; i++) {
5447 vmp = vport->vmid + i;
5448 if (vmp->flag == LPFC_VMID_SLOT_FREE)
5451 if (i == vport->max_vmid)
5458 write_unlock(&vport->vmid_lock);
5462 /* Add the vmid and register */
5463 lpfc_put_vmid_in_hashtable(vport, hash, vmp);
5464 vmp->vmid_len = len;
5465 memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
5468 vmp->flag = LPFC_VMID_SLOT_USED;
5470 vmp->delete_inactive =
5471 vport->vmid_inactivity_timeout ? 1 : 0;
5473 /* if type priority tag, get next available VMID */
5474 if (lpfc_vmid_is_type_priority_tag(vport))
5475 lpfc_vmid_assign_cs_ctl(vport, vmp);
5477 /* allocate the per cpu variable for holding */
5478 /* the last access time stamp only if VMID is enabled */
5479 if (!vmp->last_io_time)
5480 vmp->last_io_time = __alloc_percpu(sizeof(u64),
5483 if (!vmp->last_io_time) {
5484 hash_del(&vmp->hnode);
5485 vmp->flag = LPFC_VMID_SLOT_FREE;
5486 write_unlock(&vport->vmid_lock);
5490 write_unlock(&vport->vmid_lock);
5492 /* complete transaction with switch */
5493 if (lpfc_vmid_is_type_priority_tag(vport))
5494 rc = lpfc_vmid_uvem(vport, vmp, true);
5496 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
5498 write_lock(&vport->vmid_lock);
5499 vport->cur_vmid_cnt++;
5500 vmp->flag |= LPFC_VMID_REQ_REGISTER;
5501 write_unlock(&vport->vmid_lock);
5503 write_lock(&vport->vmid_lock);
5504 hash_del(&vmp->hnode);
5505 vmp->flag = LPFC_VMID_SLOT_FREE;
5506 free_percpu(vmp->last_io_time);
5507 write_unlock(&vport->vmid_lock);
5511 /* finally, enable the idle timer once */
5512 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
5513 mod_timer(&vport->phba->inactive_vmid_poll,
5515 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
5516 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
5523 * lpfc_is_command_vm_io - get the UUID from blk cgroup
5524 * @cmd: Pointer to scsi_cmnd data structure
5525 * Returns UUID if present, otherwise NULL
5527 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5529 struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5531 return bio ? blkcg_get_fc_appid(bio) : NULL;
5535 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5536 * @shost: kernel scsi host pointer.
5537 * @cmnd: Pointer to scsi_cmnd data structure.
5539 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5540 * This routine prepares an IOCB from scsi command and provides to firmware.
5541 * The @done callback is invoked after driver finished processing the command.
5545 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5548 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5550 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5551 struct lpfc_hba *phba = vport->phba;
5552 struct lpfc_iocbq *cur_iocbq = NULL;
5553 struct lpfc_rport_data *rdata;
5554 struct lpfc_nodelist *ndlp;
5555 struct lpfc_io_buf *lpfc_cmd;
5556 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5561 start = ktime_get_ns();
5562 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5564 /* sanity check on references */
5565 if (unlikely(!rdata) || unlikely(!rport))
5566 goto out_fail_command;
5568 err = fc_remote_port_chkready(rport);
5571 goto out_fail_command;
5573 ndlp = rdata->pnode;
5575 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5576 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5578 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5579 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5580 " op:%02x str=%s without registering for"
5581 " BlockGuard - Rejecting command\n",
5582 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5583 dif_op_str[scsi_get_prot_op(cmnd)]);
5584 goto out_fail_command;
5588 * Catch race where our node has transitioned, but the
5589 * transport is still transitioning.
5594 /* Check if IO qualifies for CMF */
5595 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5596 cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5597 (scsi_sg_count(cmnd))) {
5598 /* Latency start time saved in rx_cmd_start later in routine */
5599 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5604 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5605 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5606 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5607 "3377 Target Queue Full, scsi Id:%d "
5608 "Qdepth:%d Pending command:%d"
5609 " WWNN:%02x:%02x:%02x:%02x:"
5610 "%02x:%02x:%02x:%02x, "
5611 " WWPN:%02x:%02x:%02x:%02x:"
5612 "%02x:%02x:%02x:%02x",
5613 ndlp->nlp_sid, ndlp->cmd_qdepth,
5614 atomic_read(&ndlp->cmd_pending),
5615 ndlp->nlp_nodename.u.wwn[0],
5616 ndlp->nlp_nodename.u.wwn[1],
5617 ndlp->nlp_nodename.u.wwn[2],
5618 ndlp->nlp_nodename.u.wwn[3],
5619 ndlp->nlp_nodename.u.wwn[4],
5620 ndlp->nlp_nodename.u.wwn[5],
5621 ndlp->nlp_nodename.u.wwn[6],
5622 ndlp->nlp_nodename.u.wwn[7],
5623 ndlp->nlp_portname.u.wwn[0],
5624 ndlp->nlp_portname.u.wwn[1],
5625 ndlp->nlp_portname.u.wwn[2],
5626 ndlp->nlp_portname.u.wwn[3],
5627 ndlp->nlp_portname.u.wwn[4],
5628 ndlp->nlp_portname.u.wwn[5],
5629 ndlp->nlp_portname.u.wwn[6],
5630 ndlp->nlp_portname.u.wwn[7]);
5635 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5636 if (lpfc_cmd == NULL) {
5637 lpfc_rampdown_queue_depth(phba);
5639 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5640 "0707 driver's buffer pool is empty, "
5644 lpfc_cmd->rx_cmd_start = start;
5646 cur_iocbq = &lpfc_cmd->cur_iocbq;
5648 * Store the midlayer's command structure for the completion phase
5649 * and complete the command initialization.
5651 lpfc_cmd->pCmd = cmnd;
5652 lpfc_cmd->rdata = rdata;
5653 lpfc_cmd->ndlp = ndlp;
5654 cur_iocbq->cmd_cmpl = NULL;
5655 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5657 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5659 goto out_host_busy_release_buf;
5661 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5662 if (vport->phba->cfg_enable_bg) {
5663 lpfc_printf_vlog(vport,
5664 KERN_INFO, LOG_SCSI_CMD,
5665 "9033 BLKGRD: rcvd %s cmd:x%x "
5666 "reftag x%x cnt %u pt %x\n",
5667 dif_op_str[scsi_get_prot_op(cmnd)],
5669 scsi_prot_ref_tag(cmnd),
5670 scsi_logical_block_count(cmnd),
5671 (cmnd->cmnd[1]>>5));
5673 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5675 if (vport->phba->cfg_enable_bg) {
5676 lpfc_printf_vlog(vport,
5677 KERN_INFO, LOG_SCSI_CMD,
5678 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5679 "x%x reftag x%x cnt %u pt %x\n",
5681 scsi_prot_ref_tag(cmnd),
5682 scsi_logical_block_count(cmnd),
5683 (cmnd->cmnd[1]>>5));
5685 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5688 if (unlikely(err)) {
5690 cmnd->result = DID_ERROR << 16;
5691 goto out_fail_command_release_buf;
5693 goto out_host_busy_free_buf;
5696 /* check the necessary and sufficient condition to support VMID */
5697 if (lpfc_is_vmid_enabled(phba) &&
5698 (ndlp->vmid_support ||
5699 phba->pport->vmid_priority_tagging ==
5700 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5701 /* is the I/O generated by a VM, get the associated virtual */
5703 uuid = lpfc_is_command_vm_io(cmnd);
5706 err = lpfc_vmid_get_appid(vport, uuid, cmnd,
5707 (union lpfc_vmid_io_tag *)
5708 &cur_iocbq->vmid_tag);
5710 cur_iocbq->cmd_flag |= LPFC_IO_VMID;
5713 atomic_inc(&ndlp->cmd_pending);
5715 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5716 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5717 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5719 /* Issue I/O to adapter */
5720 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5722 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5724 lpfc_cmd->ts_cmd_start = start;
5725 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5726 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5728 lpfc_cmd->ts_cmd_start = 0;
5732 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5733 "3376 FCP could not issue iocb err %x "
5734 "FCP cmd x%x <%d/%llu> "
5735 "sid: x%x did: x%x oxid: x%x "
5736 "Data: x%x x%x x%x x%x\n",
5738 cmnd->device ? cmnd->device->id : 0xffff,
5739 cmnd->device ? cmnd->device->lun : (u64)-1,
5740 vport->fc_myDID, ndlp->nlp_DID,
5741 phba->sli_rev == LPFC_SLI_REV4 ?
5742 cur_iocbq->sli4_xritag : 0xffff,
5743 phba->sli_rev == LPFC_SLI_REV4 ?
5744 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5745 cur_iocbq->iocb.ulpContext,
5747 phba->sli_rev == LPFC_SLI_REV4 ?
5749 &cur_iocbq->wqe.generic.wqe_com) :
5750 cur_iocbq->iocb.ulpTimeout,
5751 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5753 goto out_host_busy_free_buf;
5756 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5757 lpfc_sli_handle_fast_ring_event(phba,
5758 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5760 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5761 lpfc_poll_rearm_timer(phba);
5764 if (phba->cfg_xri_rebalancing)
5765 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5769 out_host_busy_free_buf:
5770 idx = lpfc_cmd->hdwq_no;
5771 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5772 if (phba->sli4_hba.hdwq) {
5773 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5775 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5778 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5781 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5784 out_host_busy_release_buf:
5785 lpfc_release_scsi_buf(phba, lpfc_cmd);
5787 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5789 return SCSI_MLQUEUE_HOST_BUSY;
5792 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5795 return SCSI_MLQUEUE_TARGET_BUSY;
5797 out_fail_command_release_buf:
5798 lpfc_release_scsi_buf(phba, lpfc_cmd);
5799 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5808 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5809 * @vport: The virtual port for which this call is being executed.
5811 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5814 struct lpfc_vmid *cur;
5816 if (vport->port_type == LPFC_PHYSICAL_PORT)
5817 del_timer_sync(&vport->phba->inactive_vmid_poll);
5819 kfree(vport->qfpa_res);
5820 kfree(vport->vmid_priority.vmid_range);
5823 if (!hash_empty(vport->hash_table))
5824 hash_for_each(vport->hash_table, bucket, cur, hnode)
5825 hash_del(&cur->hnode);
5827 vport->qfpa_res = NULL;
5828 vport->vmid_priority.vmid_range = NULL;
5830 vport->cur_vmid_cnt = 0;
5834 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5835 * @cmnd: Pointer to scsi_cmnd data structure.
5837 * This routine aborts @cmnd pending in base driver.
5844 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5846 struct Scsi_Host *shost = cmnd->device->host;
5847 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5848 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5849 struct lpfc_hba *phba = vport->phba;
5850 struct lpfc_iocbq *iocb;
5851 struct lpfc_io_buf *lpfc_cmd;
5852 int ret = SUCCESS, status = 0;
5853 struct lpfc_sli_ring *pring_s4 = NULL;
5854 struct lpfc_sli_ring *pring = NULL;
5856 unsigned long flags;
5857 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5859 status = fc_block_rport(rport);
5860 if (status != 0 && status != SUCCESS)
5863 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5867 spin_lock_irqsave(&phba->hbalock, flags);
5868 /* driver queued commands are in process of being flushed */
5869 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5870 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5871 "3168 SCSI Layer abort requested I/O has been "
5872 "flushed by LLD.\n");
5877 /* Guard against IO completion being called at same time */
5878 spin_lock(&lpfc_cmd->buf_lock);
5880 if (!lpfc_cmd->pCmd) {
5881 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5882 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5883 "x%x ID %d LUN %llu\n",
5884 SUCCESS, cmnd->device->id, cmnd->device->lun);
5885 goto out_unlock_buf;
5888 iocb = &lpfc_cmd->cur_iocbq;
5889 if (phba->sli_rev == LPFC_SLI_REV4) {
5890 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5893 goto out_unlock_buf;
5895 spin_lock(&pring_s4->ring_lock);
5897 /* the command is in process of being cancelled */
5898 if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
5899 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5900 "3169 SCSI Layer abort requested I/O has been "
5901 "cancelled by LLD.\n");
5903 goto out_unlock_ring;
5906 * If pCmd field of the corresponding lpfc_io_buf structure
5907 * points to a different SCSI command, then the driver has
5908 * already completed this command, but the midlayer did not
5909 * see the completion before the eh fired. Just return SUCCESS.
5911 if (lpfc_cmd->pCmd != cmnd) {
5912 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5913 "3170 SCSI Layer abort requested I/O has been "
5914 "completed by LLD.\n");
5915 goto out_unlock_ring;
5918 BUG_ON(iocb->context1 != lpfc_cmd);
5920 /* abort issued in recovery is still in progress */
5921 if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
5922 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5923 "3389 SCSI Layer I/O Abort Request is pending\n");
5924 if (phba->sli_rev == LPFC_SLI_REV4)
5925 spin_unlock(&pring_s4->ring_lock);
5926 spin_unlock(&lpfc_cmd->buf_lock);
5927 spin_unlock_irqrestore(&phba->hbalock, flags);
5931 lpfc_cmd->waitq = &waitq;
5932 if (phba->sli_rev == LPFC_SLI_REV4) {
5933 spin_unlock(&pring_s4->ring_lock);
5934 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5935 lpfc_sli_abort_fcp_cmpl);
5937 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5938 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5939 lpfc_sli_abort_fcp_cmpl);
5942 /* Make sure HBA is alive */
5943 lpfc_issue_hb_tmo(phba);
5945 if (ret_val != IOCB_SUCCESS) {
5946 /* Indicate the IO is not being aborted by the driver. */
5947 lpfc_cmd->waitq = NULL;
5948 spin_unlock(&lpfc_cmd->buf_lock);
5949 spin_unlock_irqrestore(&phba->hbalock, flags);
5954 /* no longer need the lock after this point */
5955 spin_unlock(&lpfc_cmd->buf_lock);
5956 spin_unlock_irqrestore(&phba->hbalock, flags);
5958 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5959 lpfc_sli_handle_fast_ring_event(phba,
5960 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5964 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
5965 * for abort to complete.
5967 wait_event_timeout(waitq,
5968 (lpfc_cmd->pCmd != cmnd),
5969 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5971 spin_lock(&lpfc_cmd->buf_lock);
5973 if (lpfc_cmd->pCmd == cmnd) {
5975 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5976 "0748 abort handler timed out waiting "
5977 "for aborting I/O (xri:x%x) to complete: "
5978 "ret %#x, ID %d, LUN %llu\n",
5979 iocb->sli4_xritag, ret,
5980 cmnd->device->id, cmnd->device->lun);
5983 lpfc_cmd->waitq = NULL;
5985 spin_unlock(&lpfc_cmd->buf_lock);
5989 if (phba->sli_rev == LPFC_SLI_REV4)
5990 spin_unlock(&pring_s4->ring_lock);
5992 spin_unlock(&lpfc_cmd->buf_lock);
5994 spin_unlock_irqrestore(&phba->hbalock, flags);
5996 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5997 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5998 "LUN %llu\n", ret, cmnd->device->id,
6004 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
6006 switch (task_mgmt_cmd) {
6007 case FCP_ABORT_TASK_SET:
6008 return "ABORT_TASK_SET";
6009 case FCP_CLEAR_TASK_SET:
6010 return "FCP_CLEAR_TASK_SET";
6012 return "FCP_BUS_RESET";
6014 return "FCP_LUN_RESET";
6015 case FCP_TARGET_RESET:
6016 return "FCP_TARGET_RESET";
6018 return "FCP_CLEAR_ACA";
6019 case FCP_TERMINATE_TASK:
6020 return "FCP_TERMINATE_TASK";
6028 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
6029 * @vport: The virtual port for which this call is being executed.
6030 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
6032 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
6039 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
6041 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
6044 uint8_t rsp_info_code;
6049 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6050 "0703 fcp_rsp is missing\n");
6052 rsp_info = fcprsp->rspStatus2;
6053 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
6054 rsp_info_code = fcprsp->rspInfo3;
6057 lpfc_printf_vlog(vport, KERN_INFO,
6059 "0706 fcp_rsp valid 0x%x,"
6060 " rsp len=%d code 0x%x\n",
6062 rsp_len, rsp_info_code);
6064 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
6065 * field specifies the number of valid bytes of FCP_RSP_INFO.
6066 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
6068 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
6069 ((rsp_len == 8) || (rsp_len == 4))) {
6070 switch (rsp_info_code) {
6071 case RSP_NO_FAILURE:
6072 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6073 "0715 Task Mgmt No Failure\n");
6076 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
6077 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6078 "0716 Task Mgmt Target "
6081 case RSP_TM_NOT_COMPLETED: /* TM failed */
6082 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6083 "0717 Task Mgmt Target "
6086 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
6087 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6088 "0718 Task Mgmt to invalid "
6099 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
6100 * @vport: The virtual port for which this call is being executed.
6101 * @rport: Pointer to remote port
6102 * @tgt_id: Target ID of remote device.
6103 * @lun_id: Lun number for the TMF
6104 * @task_mgmt_cmd: type of TMF to send
6106 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
6114 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
6115 unsigned int tgt_id, uint64_t lun_id,
6116 uint8_t task_mgmt_cmd)
6118 struct lpfc_hba *phba = vport->phba;
6119 struct lpfc_io_buf *lpfc_cmd;
6120 struct lpfc_iocbq *iocbq;
6121 struct lpfc_iocbq *iocbqrsp;
6122 struct lpfc_rport_data *rdata;
6123 struct lpfc_nodelist *pnode;
6127 rdata = rport->dd_data;
6128 if (!rdata || !rdata->pnode)
6130 pnode = rdata->pnode;
6132 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
6133 if (lpfc_cmd == NULL)
6135 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
6136 lpfc_cmd->rdata = rdata;
6137 lpfc_cmd->pCmd = NULL;
6138 lpfc_cmd->ndlp = pnode;
6140 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
6143 lpfc_release_scsi_buf(phba, lpfc_cmd);
6147 iocbq = &lpfc_cmd->cur_iocbq;
6148 iocbqrsp = lpfc_sli_get_iocbq(phba);
6149 if (iocbqrsp == NULL) {
6150 lpfc_release_scsi_buf(phba, lpfc_cmd);
6153 iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
6154 iocbq->vport = vport;
6156 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6157 "0702 Issue %s to TGT %d LUN %llu "
6158 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
6159 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6160 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
6163 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
6164 iocbq, iocbqrsp, lpfc_cmd->timeout);
6165 if ((status != IOCB_SUCCESS) ||
6166 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
6167 if (status != IOCB_SUCCESS ||
6168 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
6169 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6170 "0727 TMF %s to TGT %d LUN %llu "
6171 "failed (%d, %d) cmd_flag x%x\n",
6172 lpfc_taskmgmt_name(task_mgmt_cmd),
6174 get_job_ulpstatus(phba, iocbqrsp),
6175 get_job_word4(phba, iocbqrsp),
6177 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
6178 if (status == IOCB_SUCCESS) {
6179 if (get_job_ulpstatus(phba, iocbqrsp) ==
6180 IOSTAT_FCP_RSP_ERROR)
6181 /* Something in the FCP_RSP was invalid.
6182 * Check conditions */
6183 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
6186 } else if ((status == IOCB_TIMEDOUT) ||
6187 (status == IOCB_ABORTED)) {
6188 ret = TIMEOUT_ERROR;
6195 lpfc_sli_release_iocbq(phba, iocbqrsp);
6197 if (status != IOCB_TIMEDOUT)
6198 lpfc_release_scsi_buf(phba, lpfc_cmd);
6204 * lpfc_chk_tgt_mapped -
6205 * @vport: The virtual port to check on
6206 * @rport: Pointer to fc_rport data structure.
6208 * This routine delays until the scsi target (aka rport) for the
6209 * command exists (is present and logged in) or we declare it non-existent.
6216 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
6218 struct lpfc_rport_data *rdata;
6219 struct lpfc_nodelist *pnode = NULL;
6220 unsigned long later;
6222 rdata = rport->dd_data;
6224 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6225 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
6228 pnode = rdata->pnode;
6231 * If target is not in a MAPPED state, delay until
6232 * target is rediscovered or devloss timeout expires.
6234 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6235 while (time_after(later, jiffies)) {
6238 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
6240 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
6241 rdata = rport->dd_data;
6244 pnode = rdata->pnode;
6246 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
6252 * lpfc_reset_flush_io_context -
6253 * @vport: The virtual port (scsi_host) for the flush context
6254 * @tgt_id: If aborting by Target contect - specifies the target id
6255 * @lun_id: If aborting by Lun context - specifies the lun id
6256 * @context: specifies the context level to flush at.
6258 * After a reset condition via TMF, we need to flush orphaned i/o
6259 * contexts from the adapter. This routine aborts any contexts
6260 * outstanding, then waits for their completions. The wait is
6261 * bounded by devloss_tmo though.
6268 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6269 uint64_t lun_id, lpfc_ctx_cmd context)
6271 struct lpfc_hba *phba = vport->phba;
6272 unsigned long later;
6275 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6277 lpfc_sli_abort_taskmgmt(vport,
6278 &phba->sli.sli3_ring[LPFC_FCP_RING],
6279 tgt_id, lun_id, context);
6280 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6281 while (time_after(later, jiffies) && cnt) {
6282 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6283 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6286 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6287 "0724 I/O flush failure for context %s : cnt x%x\n",
6288 ((context == LPFC_CTX_LUN) ? "LUN" :
6289 ((context == LPFC_CTX_TGT) ? "TGT" :
6290 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6298 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
6299 * @cmnd: Pointer to scsi_cmnd data structure.
6301 * This routine does a device reset by sending a LUN_RESET task management
6309 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6311 struct Scsi_Host *shost = cmnd->device->host;
6312 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6313 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6314 struct lpfc_rport_data *rdata;
6315 struct lpfc_nodelist *pnode;
6316 unsigned tgt_id = cmnd->device->id;
6317 uint64_t lun_id = cmnd->device->lun;
6318 struct lpfc_scsi_event_header scsi_event;
6320 u32 logit = LOG_FCP;
6322 rdata = rport->dd_data;
6323 if (!rdata || !rdata->pnode) {
6324 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6325 "0798 Device Reset rdata failure: rdata x%px\n",
6329 pnode = rdata->pnode;
6330 status = fc_block_rport(rport);
6331 if (status != 0 && status != SUCCESS)
6334 status = lpfc_chk_tgt_mapped(vport, rport);
6335 if (status == FAILED) {
6336 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6337 "0721 Device Reset rport failure: rdata x%px\n", rdata);
6341 scsi_event.event_type = FC_REG_SCSI_EVENT;
6342 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6343 scsi_event.lun = lun_id;
6344 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6345 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6347 fc_host_post_vendor_event(shost, fc_get_event_number(),
6348 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6350 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6352 if (status != SUCCESS)
6353 logit = LOG_TRACE_EVENT;
6355 lpfc_printf_vlog(vport, KERN_ERR, logit,
6356 "0713 SCSI layer issued Device Reset (%d, %llu) "
6357 "return x%x\n", tgt_id, lun_id, status);
6360 * We have to clean up i/o as : they may be orphaned by the TMF;
6361 * or if the TMF failed, they may be in an indeterminate state.
6363 * We will report success if all the i/o aborts successfully.
6365 if (status == SUCCESS)
6366 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6373 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6374 * @cmnd: Pointer to scsi_cmnd data structure.
6376 * This routine does a target reset by sending a TARGET_RESET task management
6384 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6386 struct Scsi_Host *shost = cmnd->device->host;
6387 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6388 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6389 struct lpfc_rport_data *rdata;
6390 struct lpfc_nodelist *pnode;
6391 unsigned tgt_id = cmnd->device->id;
6392 uint64_t lun_id = cmnd->device->lun;
6393 struct lpfc_scsi_event_header scsi_event;
6395 u32 logit = LOG_FCP;
6396 u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6397 unsigned long flags;
6398 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6400 rdata = rport->dd_data;
6401 if (!rdata || !rdata->pnode) {
6402 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6403 "0799 Target Reset rdata failure: rdata x%px\n",
6407 pnode = rdata->pnode;
6408 status = fc_block_rport(rport);
6409 if (status != 0 && status != SUCCESS)
6412 status = lpfc_chk_tgt_mapped(vport, rport);
6413 if (status == FAILED) {
6414 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6415 "0722 Target Reset rport failure: rdata x%px\n", rdata);
6417 spin_lock_irqsave(&pnode->lock, flags);
6418 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6419 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6420 spin_unlock_irqrestore(&pnode->lock, flags);
6422 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6424 return FAST_IO_FAIL;
6427 scsi_event.event_type = FC_REG_SCSI_EVENT;
6428 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6430 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6431 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6433 fc_host_post_vendor_event(shost, fc_get_event_number(),
6434 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6436 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6438 if (status != SUCCESS) {
6439 logit = LOG_TRACE_EVENT;
6441 /* Issue LOGO, if no LOGO is outstanding */
6442 spin_lock_irqsave(&pnode->lock, flags);
6443 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6444 !pnode->logo_waitq) {
6445 pnode->logo_waitq = &waitq;
6446 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6447 pnode->nlp_flag |= NLP_ISSUE_LOGO;
6448 pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6449 spin_unlock_irqrestore(&pnode->lock, flags);
6450 lpfc_unreg_rpi(vport, pnode);
6451 wait_event_timeout(waitq,
6452 (!(pnode->save_flags &
6453 NLP_WAIT_FOR_LOGO)),
6454 msecs_to_jiffies(dev_loss_tmo *
6457 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6458 lpfc_printf_vlog(vport, KERN_ERR, logit,
6459 "0725 SCSI layer TGTRST "
6460 "failed & LOGO TMO (%d, %llu) "
6462 tgt_id, lun_id, status);
6463 spin_lock_irqsave(&pnode->lock, flags);
6464 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6466 spin_lock_irqsave(&pnode->lock, flags);
6468 pnode->logo_waitq = NULL;
6469 spin_unlock_irqrestore(&pnode->lock, flags);
6473 spin_unlock_irqrestore(&pnode->lock, flags);
6478 lpfc_printf_vlog(vport, KERN_ERR, logit,
6479 "0723 SCSI layer issued Target Reset (%d, %llu) "
6480 "return x%x\n", tgt_id, lun_id, status);
6483 * We have to clean up i/o as : they may be orphaned by the TMF;
6484 * or if the TMF failed, they may be in an indeterminate state.
6486 * We will report success if all the i/o aborts successfully.
6488 if (status == SUCCESS)
6489 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6495 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6496 * @cmnd: Pointer to scsi_cmnd data structure.
6498 * This routine does host reset to the adaptor port. It brings the HBA
6499 * offline, performs a board restart, and then brings the board back online.
6500 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6501 * reject all outstanding SCSI commands to the host and error returned
6502 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6503 * of error handling, it will only return error if resetting of the adapter
6504 * is not successful; in all other cases, will return success.
6511 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6513 struct Scsi_Host *shost = cmnd->device->host;
6514 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6515 struct lpfc_hba *phba = vport->phba;
6516 int rc, ret = SUCCESS;
6518 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6519 "3172 SCSI layer issued Host Reset Data:\n");
6521 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6523 rc = lpfc_sli_brdrestart(phba);
6527 /* Wait for successful restart of adapter */
6528 if (phba->sli_rev < LPFC_SLI_REV4) {
6529 rc = lpfc_sli_chipset_init(phba);
6534 rc = lpfc_online(phba);
6538 lpfc_unblock_mgmt_io(phba);
6542 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6543 "3323 Failed host reset\n");
6544 lpfc_unblock_mgmt_io(phba);
6549 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6550 * @sdev: Pointer to scsi_device.
6552 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6553 * globally available list of scsi buffers. This routine also makes sure scsi
6554 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6555 * of scsi buffer exists for the lifetime of the driver.
6562 lpfc_slave_alloc(struct scsi_device *sdev)
6564 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6565 struct lpfc_hba *phba = vport->phba;
6566 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6568 uint32_t num_to_alloc = 0;
6569 int num_allocated = 0;
6571 struct lpfc_device_data *device_data;
6572 unsigned long flags;
6573 struct lpfc_name target_wwpn;
6575 if (!rport || fc_remote_port_chkready(rport))
6578 if (phba->cfg_fof) {
6581 * Check to see if the device data structure for the lun
6582 * exists. If not, create one.
6585 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6586 spin_lock_irqsave(&phba->devicelock, flags);
6587 device_data = __lpfc_get_device_data(phba,
6589 &vport->fc_portname,
6593 spin_unlock_irqrestore(&phba->devicelock, flags);
6594 device_data = lpfc_create_device_data(phba,
6595 &vport->fc_portname,
6598 phba->cfg_XLanePriority,
6602 spin_lock_irqsave(&phba->devicelock, flags);
6603 list_add_tail(&device_data->listentry, &phba->luns);
6605 device_data->rport_data = rport->dd_data;
6606 device_data->available = true;
6607 spin_unlock_irqrestore(&phba->devicelock, flags);
6608 sdev->hostdata = device_data;
6610 sdev->hostdata = rport->dd_data;
6612 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6614 /* For SLI4, all IO buffers are pre-allocated */
6615 if (phba->sli_rev == LPFC_SLI_REV4)
6618 /* This code path is now ONLY for SLI3 adapters */
6621 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6622 * available list of scsi buffers. Don't allocate more than the
6623 * HBA limit conveyed to the midlayer via the host structure. The
6624 * formula accounts for the lun_queue_depth + error handlers + 1
6625 * extra. This list of scsi bufs exists for the lifetime of the driver.
6627 total = phba->total_scsi_bufs;
6628 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6630 /* If allocated buffers are enough do nothing */
6631 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6634 /* Allow some exchanges to be available always to complete discovery */
6635 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6636 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6637 "0704 At limitation of %d preallocated "
6638 "command buffers\n", total);
6640 /* Allow some exchanges to be available always to complete discovery */
6641 } else if (total + num_to_alloc >
6642 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6643 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6644 "0705 Allocation request of %d "
6645 "command buffers will exceed max of %d. "
6646 "Reducing allocation request to %d.\n",
6647 num_to_alloc, phba->cfg_hba_queue_depth,
6648 (phba->cfg_hba_queue_depth - total));
6649 num_to_alloc = phba->cfg_hba_queue_depth - total;
6651 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6652 if (num_to_alloc != num_allocated) {
6653 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6654 "0708 Allocation request of %d "
6655 "command buffers did not succeed. "
6656 "Allocated %d buffers.\n",
6657 num_to_alloc, num_allocated);
6659 if (num_allocated > 0)
6660 phba->total_scsi_bufs += num_allocated;
6665 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6666 * @sdev: Pointer to scsi_device.
6668 * This routine configures following items
6669 * - Tag command queuing support for @sdev if supported.
6670 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6676 lpfc_slave_configure(struct scsi_device *sdev)
6678 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6679 struct lpfc_hba *phba = vport->phba;
6681 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6683 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6684 lpfc_sli_handle_fast_ring_event(phba,
6685 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6686 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6687 lpfc_poll_rearm_timer(phba);
6694 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6695 * @sdev: Pointer to scsi_device.
6697 * This routine sets @sdev hostatdata filed to null.
6700 lpfc_slave_destroy(struct scsi_device *sdev)
6702 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6703 struct lpfc_hba *phba = vport->phba;
6704 unsigned long flags;
6705 struct lpfc_device_data *device_data = sdev->hostdata;
6707 atomic_dec(&phba->sdev_cnt);
6708 if ((phba->cfg_fof) && (device_data)) {
6709 spin_lock_irqsave(&phba->devicelock, flags);
6710 device_data->available = false;
6711 if (!device_data->oas_enabled)
6712 lpfc_delete_device_data(phba, device_data);
6713 spin_unlock_irqrestore(&phba->devicelock, flags);
6715 sdev->hostdata = NULL;
6720 * lpfc_create_device_data - creates and initializes device data structure for OAS
6721 * @phba: Pointer to host bus adapter structure.
6722 * @vport_wwpn: Pointer to vport's wwpn information
6723 * @target_wwpn: Pointer to target's wwpn information
6724 * @lun: Lun on target
6726 * @atomic_create: Flag to indicate if memory should be allocated using the
6727 * GFP_ATOMIC flag or not.
6729 * This routine creates a device data structure which will contain identifying
6730 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6731 * whether or not the corresponding lun is available by the system,
6732 * and pointer to the rport data.
6736 * Pointer to lpfc_device_data - Success
6738 struct lpfc_device_data*
6739 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6740 struct lpfc_name *target_wwpn, uint64_t lun,
6741 uint32_t pri, bool atomic_create)
6744 struct lpfc_device_data *lun_info;
6747 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6751 /* Attempt to create the device data to contain lun info */
6754 memory_flags = GFP_ATOMIC;
6756 memory_flags = GFP_KERNEL;
6757 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6760 INIT_LIST_HEAD(&lun_info->listentry);
6761 lun_info->rport_data = NULL;
6762 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6763 sizeof(struct lpfc_name));
6764 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6765 sizeof(struct lpfc_name));
6766 lun_info->device_id.lun = lun;
6767 lun_info->oas_enabled = false;
6768 lun_info->priority = pri;
6769 lun_info->available = false;
6774 * lpfc_delete_device_data - frees a device data structure for OAS
6775 * @phba: Pointer to host bus adapter structure.
6776 * @lun_info: Pointer to device data structure to free.
6778 * This routine frees the previously allocated device data structure passed.
6782 lpfc_delete_device_data(struct lpfc_hba *phba,
6783 struct lpfc_device_data *lun_info)
6786 if (unlikely(!phba) || !lun_info ||
6790 if (!list_empty(&lun_info->listentry))
6791 list_del(&lun_info->listentry);
6792 mempool_free(lun_info, phba->device_data_mem_pool);
6797 * __lpfc_get_device_data - returns the device data for the specified lun
6798 * @phba: Pointer to host bus adapter structure.
6799 * @list: Point to list to search.
6800 * @vport_wwpn: Pointer to vport's wwpn information
6801 * @target_wwpn: Pointer to target's wwpn information
6802 * @lun: Lun on target
6804 * This routine searches the list passed for the specified lun's device data.
6805 * This function does not hold locks, it is the responsibility of the caller
6806 * to ensure the proper lock is held before calling the function.
6810 * Pointer to lpfc_device_data - Success
6812 struct lpfc_device_data*
6813 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6814 struct lpfc_name *vport_wwpn,
6815 struct lpfc_name *target_wwpn, uint64_t lun)
6818 struct lpfc_device_data *lun_info;
6820 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6824 /* Check to see if the lun is already enabled for OAS. */
6826 list_for_each_entry(lun_info, list, listentry) {
6827 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6828 sizeof(struct lpfc_name)) == 0) &&
6829 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6830 sizeof(struct lpfc_name)) == 0) &&
6831 (lun_info->device_id.lun == lun))
6839 * lpfc_find_next_oas_lun - searches for the next oas lun
6840 * @phba: Pointer to host bus adapter structure.
6841 * @vport_wwpn: Pointer to vport's wwpn information
6842 * @target_wwpn: Pointer to target's wwpn information
6843 * @starting_lun: Pointer to the lun to start searching for
6844 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6845 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6846 * @found_lun: Pointer to the found lun.
6847 * @found_lun_status: Pointer to status of the found lun.
6848 * @found_lun_pri: Pointer to priority of the found lun.
6850 * This routine searches the luns list for the specified lun
6851 * or the first lun for the vport/target. If the vport wwpn contains
6852 * a zero value then a specific vport is not specified. In this case
6853 * any vport which contains the lun will be considered a match. If the
6854 * target wwpn contains a zero value then a specific target is not specified.
6855 * In this case any target which contains the lun will be considered a
6856 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6857 * are returned. The function will also return the next lun if available.
6858 * If the next lun is not found, starting_lun parameter will be set to
6866 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6867 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6868 struct lpfc_name *found_vport_wwpn,
6869 struct lpfc_name *found_target_wwpn,
6870 uint64_t *found_lun,
6871 uint32_t *found_lun_status,
6872 uint32_t *found_lun_pri)
6875 unsigned long flags;
6876 struct lpfc_device_data *lun_info;
6877 struct lpfc_device_id *device_id;
6881 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6882 !starting_lun || !found_vport_wwpn ||
6883 !found_target_wwpn || !found_lun || !found_lun_status ||
6884 (*starting_lun == NO_MORE_OAS_LUN) ||
6888 lun = *starting_lun;
6889 *found_lun = NO_MORE_OAS_LUN;
6890 *starting_lun = NO_MORE_OAS_LUN;
6892 /* Search for lun or the lun closet in value */
6894 spin_lock_irqsave(&phba->devicelock, flags);
6895 list_for_each_entry(lun_info, &phba->luns, listentry) {
6896 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6897 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6898 sizeof(struct lpfc_name)) == 0)) &&
6899 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6900 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6901 sizeof(struct lpfc_name)) == 0)) &&
6902 (lun_info->oas_enabled)) {
6903 device_id = &lun_info->device_id;
6905 ((lun == FIND_FIRST_OAS_LUN) ||
6906 (device_id->lun == lun))) {
6907 *found_lun = device_id->lun;
6908 memcpy(found_vport_wwpn,
6909 &device_id->vport_wwpn,
6910 sizeof(struct lpfc_name));
6911 memcpy(found_target_wwpn,
6912 &device_id->target_wwpn,
6913 sizeof(struct lpfc_name));
6914 if (lun_info->available)
6916 OAS_LUN_STATUS_EXISTS;
6918 *found_lun_status = 0;
6919 *found_lun_pri = lun_info->priority;
6920 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6921 memset(vport_wwpn, 0x0,
6922 sizeof(struct lpfc_name));
6923 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6924 memset(target_wwpn, 0x0,
6925 sizeof(struct lpfc_name));
6928 *starting_lun = device_id->lun;
6929 memcpy(vport_wwpn, &device_id->vport_wwpn,
6930 sizeof(struct lpfc_name));
6931 memcpy(target_wwpn, &device_id->target_wwpn,
6932 sizeof(struct lpfc_name));
6937 spin_unlock_irqrestore(&phba->devicelock, flags);
6942 * lpfc_enable_oas_lun - enables a lun for OAS operations
6943 * @phba: Pointer to host bus adapter structure.
6944 * @vport_wwpn: Pointer to vport's wwpn information
6945 * @target_wwpn: Pointer to target's wwpn information
6949 * This routine enables a lun for oas operations. The routines does so by
6950 * doing the following :
6952 * 1) Checks to see if the device data for the lun has been created.
6953 * 2) If found, sets the OAS enabled flag if not set and returns.
6954 * 3) Otherwise, creates a device data structure.
6955 * 4) If successfully created, indicates the device data is for an OAS lun,
6956 * indicates the lun is not available and add to the list of luns.
6963 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6964 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6967 struct lpfc_device_data *lun_info;
6968 unsigned long flags;
6970 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6974 spin_lock_irqsave(&phba->devicelock, flags);
6976 /* Check to see if the device data for the lun has been created */
6977 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6980 if (!lun_info->oas_enabled)
6981 lun_info->oas_enabled = true;
6982 lun_info->priority = pri;
6983 spin_unlock_irqrestore(&phba->devicelock, flags);
6987 /* Create an lun info structure and add to list of luns */
6988 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6991 lun_info->oas_enabled = true;
6992 lun_info->priority = pri;
6993 lun_info->available = false;
6994 list_add_tail(&lun_info->listentry, &phba->luns);
6995 spin_unlock_irqrestore(&phba->devicelock, flags);
6998 spin_unlock_irqrestore(&phba->devicelock, flags);
7003 * lpfc_disable_oas_lun - disables a lun for OAS operations
7004 * @phba: Pointer to host bus adapter structure.
7005 * @vport_wwpn: Pointer to vport's wwpn information
7006 * @target_wwpn: Pointer to target's wwpn information
7010 * This routine disables a lun for oas operations. The routines does so by
7011 * doing the following :
7013 * 1) Checks to see if the device data for the lun is created.
7014 * 2) If present, clears the flag indicating this lun is for OAS.
7015 * 3) If the lun is not available by the system, the device data is
7023 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
7024 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
7027 struct lpfc_device_data *lun_info;
7028 unsigned long flags;
7030 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
7034 spin_lock_irqsave(&phba->devicelock, flags);
7036 /* Check to see if the lun is available. */
7037 lun_info = __lpfc_get_device_data(phba,
7038 &phba->luns, vport_wwpn,
7041 lun_info->oas_enabled = false;
7042 lun_info->priority = pri;
7043 if (!lun_info->available)
7044 lpfc_delete_device_data(phba, lun_info);
7045 spin_unlock_irqrestore(&phba->devicelock, flags);
7049 spin_unlock_irqrestore(&phba->devicelock, flags);
7054 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
7056 return SCSI_MLQUEUE_HOST_BUSY;
7060 lpfc_no_slave(struct scsi_device *sdev)
7065 struct scsi_host_template lpfc_template_nvme = {
7066 .module = THIS_MODULE,
7067 .name = LPFC_DRIVER_NAME,
7068 .proc_name = LPFC_DRIVER_NAME,
7070 .queuecommand = lpfc_no_command,
7071 .slave_alloc = lpfc_no_slave,
7072 .slave_configure = lpfc_no_slave,
7073 .scan_finished = lpfc_scan_finished,
7077 .shost_groups = lpfc_hba_groups,
7078 .max_sectors = 0xFFFFFFFF,
7079 .vendor_id = LPFC_NL_VENDOR_ID,
7080 .track_queue_depth = 0,
7083 struct scsi_host_template lpfc_template = {
7084 .module = THIS_MODULE,
7085 .name = LPFC_DRIVER_NAME,
7086 .proc_name = LPFC_DRIVER_NAME,
7088 .queuecommand = lpfc_queuecommand,
7089 .eh_timed_out = fc_eh_timed_out,
7090 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
7091 .eh_abort_handler = lpfc_abort_handler,
7092 .eh_device_reset_handler = lpfc_device_reset_handler,
7093 .eh_target_reset_handler = lpfc_target_reset_handler,
7094 .eh_host_reset_handler = lpfc_host_reset_handler,
7095 .slave_alloc = lpfc_slave_alloc,
7096 .slave_configure = lpfc_slave_configure,
7097 .slave_destroy = lpfc_slave_destroy,
7098 .scan_finished = lpfc_scan_finished,
7100 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
7101 .cmd_per_lun = LPFC_CMD_PER_LUN,
7102 .shost_groups = lpfc_hba_groups,
7103 .max_sectors = 0xFFFFFFFF,
7104 .vendor_id = LPFC_NL_VENDOR_ID,
7105 .change_queue_depth = scsi_change_queue_depth,
7106 .track_queue_depth = 1,