1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <linux/blk-cgroup.h>
32 #include <net/checksum.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_eh.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/scsi_transport_fc.h>
41 #include "lpfc_version.h"
45 #include "lpfc_sli4.h"
47 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
54 #define LPFC_RESET_WAIT 2
55 #define LPFC_ABORT_WAIT 2
57 static char *dif_op_str[] = {
67 struct scsi_dif_tuple {
68 __be16 guard_tag; /* Checksum */
69 __be16 app_tag; /* Opaque storage */
70 __be32 ref_tag; /* Target LBA or indirect LBA */
73 static struct lpfc_rport_data *
74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78 if (vport->phba->cfg_fof)
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81 return (struct lpfc_rport_data *)sdev->hostdata;
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
92 struct lpfc_vmid *vmp);
93 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
94 *cmd, struct lpfc_vmid *vmp,
95 union lpfc_vmid_io_tag *tag);
96 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
97 struct lpfc_vmid *vmid);
99 static inline unsigned
100 lpfc_cmd_blksize(struct scsi_cmnd *sc)
102 return sc->device->sector_size;
105 #define LPFC_CHECK_PROTECT_GUARD 1
106 #define LPFC_CHECK_PROTECT_REF 2
107 static inline unsigned
108 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
113 static inline unsigned
114 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
116 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
118 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
125 * @phba: Pointer to HBA object.
126 * @lpfc_cmd: lpfc scsi command object pointer.
128 * This function is called from the lpfc_prep_task_mgmt_cmd function to
129 * set the last bit in the response sge entry.
132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_io_buf *lpfc_cmd)
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
144 #define LPFC_INVALID_REFTAG ((u32)-1)
147 * lpfc_update_stats - Update statistical data for the command completion
148 * @vport: The virtual port on which this call is executing.
149 * @lpfc_cmd: lpfc scsi command object pointer.
151 * This function is called when there is a command completion and this
152 * function updates the statistical data for the command completion.
155 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
157 struct lpfc_hba *phba = vport->phba;
158 struct lpfc_rport_data *rdata;
159 struct lpfc_nodelist *pnode;
160 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
162 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
163 unsigned long latency;
166 if (!vport->stat_data_enabled ||
167 vport->stat_data_blocked ||
171 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
172 rdata = lpfc_cmd->rdata;
173 pnode = rdata->pnode;
175 spin_lock_irqsave(shost->host_lock, flags);
178 (phba->bucket_type == LPFC_NO_BUCKET)) {
179 spin_unlock_irqrestore(shost->host_lock, flags);
183 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
184 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
186 /* check array subscript bounds */
189 else if (i >= LPFC_MAX_BUCKET_COUNT)
190 i = LPFC_MAX_BUCKET_COUNT - 1;
192 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
193 if (latency <= (phba->bucket_base +
194 ((1<<i)*phba->bucket_step)))
198 pnode->lat_data[i].cmd_count++;
199 spin_unlock_irqrestore(shost->host_lock, flags);
203 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
204 * @phba: The Hba for which this call is being executed.
206 * This routine is called when there is resource error in driver or firmware.
207 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
208 * posts at most 1 event each second. This routine wakes up worker thread of
209 * @phba to process WORKER_RAM_DOWN_EVENT event.
211 * This routine should be called with no lock held.
214 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
218 unsigned long expires;
220 spin_lock_irqsave(&phba->hbalock, flags);
221 atomic_inc(&phba->num_rsrc_err);
222 phba->last_rsrc_error_time = jiffies;
224 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
225 if (time_after(expires, jiffies)) {
226 spin_unlock_irqrestore(&phba->hbalock, flags);
230 phba->last_ramp_down_time = jiffies;
232 spin_unlock_irqrestore(&phba->hbalock, flags);
234 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
235 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
237 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
238 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
241 lpfc_worker_wake_up(phba);
246 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
247 * @phba: The Hba for which this call is being executed.
249 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
250 * thread.This routine reduces queue depth for all scsi device on each vport
251 * associated with @phba.
254 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
256 struct lpfc_vport **vports;
257 struct Scsi_Host *shost;
258 struct scsi_device *sdev;
259 unsigned long new_queue_depth;
260 unsigned long num_rsrc_err, num_cmd_success;
263 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
264 num_cmd_success = atomic_read(&phba->num_cmd_success);
267 * The error and success command counters are global per
268 * driver instance. If another handler has already
269 * operated on this error event, just exit.
271 if (num_rsrc_err == 0)
274 vports = lpfc_create_vport_work_array(phba);
276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
277 shost = lpfc_shost_from_vport(vports[i]);
278 shost_for_each_device(sdev, shost) {
280 sdev->queue_depth * num_rsrc_err /
281 (num_rsrc_err + num_cmd_success);
282 if (!new_queue_depth)
283 new_queue_depth = sdev->queue_depth - 1;
285 new_queue_depth = sdev->queue_depth -
287 scsi_change_queue_depth(sdev, new_queue_depth);
290 lpfc_destroy_vport_work_array(phba, vports);
291 atomic_set(&phba->num_rsrc_err, 0);
292 atomic_set(&phba->num_cmd_success, 0);
296 * lpfc_scsi_dev_block - set all scsi hosts to block state
297 * @phba: Pointer to HBA context object.
299 * This function walks vport list and set each SCSI host to block state
300 * by invoking fc_remote_port_delete() routine. This function is invoked
301 * with EEH when device's PCI slot has been permanently disabled.
304 lpfc_scsi_dev_block(struct lpfc_hba *phba)
306 struct lpfc_vport **vports;
307 struct Scsi_Host *shost;
308 struct scsi_device *sdev;
309 struct fc_rport *rport;
312 vports = lpfc_create_vport_work_array(phba);
314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
315 shost = lpfc_shost_from_vport(vports[i]);
316 shost_for_each_device(sdev, shost) {
317 rport = starget_to_rport(scsi_target(sdev));
318 fc_remote_port_delete(rport);
321 lpfc_destroy_vport_work_array(phba, vports);
325 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
326 * @vport: The virtual port for which this call being executed.
327 * @num_to_alloc: The requested number of buffers to allocate.
329 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
330 * the scsi buffer contains all the necessary information needed to initiate
331 * a SCSI I/O. The non-DMAable buffer region contains information to build
332 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
333 * and the initial BPL. In addition to allocating memory, the FCP CMND and
334 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
337 * int - number of scsi buffers that were allocated.
338 * 0 = failure, less than num_to_alloc is a partial failure.
341 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
343 struct lpfc_hba *phba = vport->phba;
344 struct lpfc_io_buf *psb;
345 struct ulp_bde64 *bpl;
347 dma_addr_t pdma_phys_fcp_cmd;
348 dma_addr_t pdma_phys_fcp_rsp;
349 dma_addr_t pdma_phys_sgl;
353 bpl_size = phba->cfg_sg_dma_buf_size -
354 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
356 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
357 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
358 num_to_alloc, phba->cfg_sg_dma_buf_size,
359 (int)sizeof(struct fcp_cmnd),
360 (int)sizeof(struct fcp_rsp), bpl_size);
362 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
363 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
368 * Get memory from the pci pool to map the virt space to pci
369 * bus space for an I/O. The DMA buffer includes space for the
370 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
371 * necessary to support the sg_tablesize.
373 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
374 GFP_KERNEL, &psb->dma_handle);
381 /* Allocate iotag for psb->cur_iocbq. */
382 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
384 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
385 psb->data, psb->dma_handle);
389 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
391 psb->fcp_cmnd = psb->data;
392 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
393 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
394 sizeof(struct fcp_rsp);
396 /* Initialize local short-hand pointers. */
397 bpl = (struct ulp_bde64 *)psb->dma_sgl;
398 pdma_phys_fcp_cmd = psb->dma_handle;
399 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
400 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
401 sizeof(struct fcp_rsp);
404 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
405 * are sg list bdes. Initialize the first two and leave the
406 * rest for queuecommand.
408 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
409 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
410 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
411 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
412 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
414 /* Setup the physical region for the FCP RSP */
415 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
416 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
417 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
418 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
419 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
422 * Since the IOCB for the FCP I/O is built into this
423 * lpfc_scsi_buf, initialize it with all known data now.
425 iocb = &psb->cur_iocbq.iocb;
426 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
427 if ((phba->sli_rev == 3) &&
428 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
429 /* fill in immediate fcp command BDE */
430 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
431 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
432 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
434 iocb->un.fcpi64.bdl.addrHigh = 0;
435 iocb->ulpBdeCount = 0;
437 /* fill in response BDE */
438 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
440 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
441 sizeof(struct fcp_rsp);
442 iocb->unsli3.fcp_ext.rbde.addrLow =
443 putPaddrLow(pdma_phys_fcp_rsp);
444 iocb->unsli3.fcp_ext.rbde.addrHigh =
445 putPaddrHigh(pdma_phys_fcp_rsp);
447 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
448 iocb->un.fcpi64.bdl.bdeSize =
449 (2 * sizeof(struct ulp_bde64));
450 iocb->un.fcpi64.bdl.addrLow =
451 putPaddrLow(pdma_phys_sgl);
452 iocb->un.fcpi64.bdl.addrHigh =
453 putPaddrHigh(pdma_phys_sgl);
454 iocb->ulpBdeCount = 1;
457 iocb->ulpClass = CLASS3;
458 psb->status = IOSTAT_SUCCESS;
459 /* Put it back into the SCSI buffer list */
460 psb->cur_iocbq.context1 = psb;
461 spin_lock_init(&psb->buf_lock);
462 lpfc_release_scsi_buf_s3(phba, psb);
470 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
471 * @vport: pointer to lpfc vport data structure.
473 * This routine is invoked by the vport cleanup for deletions and the cleanup
474 * for an ndlp on removal.
477 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
479 struct lpfc_hba *phba = vport->phba;
480 struct lpfc_io_buf *psb, *next_psb;
481 struct lpfc_sli4_hdw_queue *qp;
482 unsigned long iflag = 0;
485 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
488 spin_lock_irqsave(&phba->hbalock, iflag);
489 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
490 qp = &phba->sli4_hba.hdwq[idx];
492 spin_lock(&qp->abts_io_buf_list_lock);
493 list_for_each_entry_safe(psb, next_psb,
494 &qp->lpfc_abts_io_buf_list, list) {
495 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
498 if (psb->rdata && psb->rdata->pnode &&
499 psb->rdata->pnode->vport == vport)
502 spin_unlock(&qp->abts_io_buf_list_lock);
504 spin_unlock_irqrestore(&phba->hbalock, iflag);
508 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
509 * @phba: pointer to lpfc hba data structure.
510 * @axri: pointer to the fcp xri abort wcqe structure.
511 * @idx: index into hdwq
513 * This routine is invoked by the worker thread to process a SLI4 fast-path
514 * FCP or NVME aborted xri.
517 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
518 struct sli4_wcqe_xri_aborted *axri, int idx)
520 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
521 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
522 struct lpfc_io_buf *psb, *next_psb;
523 struct lpfc_sli4_hdw_queue *qp;
524 unsigned long iflag = 0;
525 struct lpfc_iocbq *iocbq;
527 struct lpfc_nodelist *ndlp;
529 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
530 struct scsi_cmnd *cmd;
532 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
535 qp = &phba->sli4_hba.hdwq[idx];
536 spin_lock_irqsave(&phba->hbalock, iflag);
537 spin_lock(&qp->abts_io_buf_list_lock);
538 list_for_each_entry_safe(psb, next_psb,
539 &qp->lpfc_abts_io_buf_list, list) {
540 if (psb->cur_iocbq.sli4_xritag == xri) {
541 list_del_init(&psb->list);
542 psb->flags &= ~LPFC_SBUF_XBUSY;
543 psb->status = IOSTAT_SUCCESS;
544 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
545 qp->abts_nvme_io_bufs--;
546 spin_unlock(&qp->abts_io_buf_list_lock);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
551 qp->abts_scsi_io_bufs--;
552 spin_unlock(&qp->abts_io_buf_list_lock);
554 if (psb->rdata && psb->rdata->pnode)
555 ndlp = psb->rdata->pnode;
559 rrq_empty = list_empty(&phba->active_rrq_list);
560 spin_unlock_irqrestore(&phba->hbalock, iflag);
562 lpfc_set_rrq_active(phba, ndlp,
563 psb->cur_iocbq.sli4_lxritag, rxid, 1);
564 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
567 if (phba->cfg_fcp_wait_abts_rsp) {
568 spin_lock_irqsave(&psb->buf_lock, iflag);
571 spin_unlock_irqrestore(&psb->buf_lock, iflag);
573 /* The sdev is not guaranteed to be valid post
580 * We expect there is an abort thread waiting
581 * for command completion wake up the thread.
583 spin_lock_irqsave(&psb->buf_lock, iflag);
584 psb->cur_iocbq.iocb_flag &=
585 ~LPFC_DRIVER_ABORTED;
588 spin_unlock_irqrestore(&psb->buf_lock, iflag);
591 lpfc_release_scsi_buf_s4(phba, psb);
593 lpfc_worker_wake_up(phba);
597 spin_unlock(&qp->abts_io_buf_list_lock);
598 for (i = 1; i <= phba->sli.last_iotag; i++) {
599 iocbq = phba->sli.iocbq_lookup[i];
601 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
602 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
604 if (iocbq->sli4_xritag != xri)
606 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
607 psb->flags &= ~LPFC_SBUF_XBUSY;
608 spin_unlock_irqrestore(&phba->hbalock, iflag);
609 if (!list_empty(&pring->txq))
610 lpfc_worker_wake_up(phba);
614 spin_unlock_irqrestore(&phba->hbalock, iflag);
618 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
619 * @phba: The HBA for which this call is being executed.
620 * @ndlp: pointer to a node-list data structure.
621 * @cmnd: Pointer to scsi_cmnd data structure.
623 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
624 * and returns to caller.
628 * Pointer to lpfc_scsi_buf - Success
630 static struct lpfc_io_buf *
631 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
632 struct scsi_cmnd *cmnd)
634 struct lpfc_io_buf *lpfc_cmd = NULL;
635 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
636 unsigned long iflag = 0;
638 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
639 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
642 spin_lock(&phba->scsi_buf_list_put_lock);
643 list_splice(&phba->lpfc_scsi_buf_list_put,
644 &phba->lpfc_scsi_buf_list_get);
645 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
646 list_remove_head(scsi_buf_list_get, lpfc_cmd,
647 struct lpfc_io_buf, list);
648 spin_unlock(&phba->scsi_buf_list_put_lock);
650 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
652 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
653 atomic_inc(&ndlp->cmd_pending);
654 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
659 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
660 * @phba: The HBA for which this call is being executed.
661 * @ndlp: pointer to a node-list data structure.
662 * @cmnd: Pointer to scsi_cmnd data structure.
664 * This routine removes a scsi buffer from head of @hdwq io_buf_list
665 * and returns to caller.
669 * Pointer to lpfc_scsi_buf - Success
671 static struct lpfc_io_buf *
672 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
673 struct scsi_cmnd *cmnd)
675 struct lpfc_io_buf *lpfc_cmd;
676 struct lpfc_sli4_hdw_queue *qp;
677 struct sli4_sge *sgl;
678 dma_addr_t pdma_phys_fcp_rsp;
679 dma_addr_t pdma_phys_fcp_cmd;
682 struct fcp_cmd_rsp_buf *tmp = NULL;
684 cpu = raw_smp_processor_id();
685 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
686 tag = blk_mq_unique_tag(cmnd->request);
687 idx = blk_mq_unique_tag_to_hwq(tag);
689 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
692 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
693 !phba->cfg_xri_rebalancing);
695 qp = &phba->sli4_hba.hdwq[idx];
700 /* Setup key fields in buffer that may have been changed
701 * if other protocols used this buffer.
703 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
704 lpfc_cmd->prot_seg_cnt = 0;
705 lpfc_cmd->seg_cnt = 0;
706 lpfc_cmd->timeout = 0;
708 lpfc_cmd->start_time = jiffies;
709 lpfc_cmd->waitq = NULL;
711 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
712 lpfc_cmd->prot_data_type = 0;
714 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
716 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
720 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
721 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
724 * The first two SGEs are the FCP_CMD and FCP_RSP.
725 * The balance are sg list bdes. Initialize the
726 * first two and leave the rest for queuecommand.
728 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
729 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
730 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
731 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
732 sgl->word2 = le32_to_cpu(sgl->word2);
733 bf_set(lpfc_sli4_sge_last, sgl, 0);
734 sgl->word2 = cpu_to_le32(sgl->word2);
735 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
738 /* Setup the physical region for the FCP RSP */
739 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
740 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
741 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
742 sgl->word2 = le32_to_cpu(sgl->word2);
743 bf_set(lpfc_sli4_sge_last, sgl, 1);
744 sgl->word2 = cpu_to_le32(sgl->word2);
745 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
747 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
748 atomic_inc(&ndlp->cmd_pending);
749 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
754 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
755 * @phba: The HBA for which this call is being executed.
756 * @ndlp: pointer to a node-list data structure.
757 * @cmnd: Pointer to scsi_cmnd data structure.
759 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
760 * and returns to caller.
764 * Pointer to lpfc_scsi_buf - Success
766 static struct lpfc_io_buf*
767 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
768 struct scsi_cmnd *cmnd)
770 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
774 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
775 * @phba: The Hba for which this call is being executed.
776 * @psb: The scsi buffer which is being released.
778 * This routine releases @psb scsi buffer by adding it to tail of @phba
779 * lpfc_scsi_buf_list list.
782 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
784 unsigned long iflag = 0;
787 psb->prot_seg_cnt = 0;
789 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
791 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
792 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
793 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
797 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
798 * @phba: The Hba for which this call is being executed.
799 * @psb: The scsi buffer which is being released.
801 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
802 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
803 * and cannot be reused for at least RA_TOV amount of time if it was
807 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
809 struct lpfc_sli4_hdw_queue *qp;
810 unsigned long iflag = 0;
813 psb->prot_seg_cnt = 0;
816 if (psb->flags & LPFC_SBUF_XBUSY) {
817 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
818 if (!phba->cfg_fcp_wait_abts_rsp)
820 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
821 qp->abts_scsi_io_bufs++;
822 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
824 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
829 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
830 * @phba: The Hba for which this call is being executed.
831 * @psb: The scsi buffer which is being released.
833 * This routine releases @psb scsi buffer by adding it to tail of @phba
834 * lpfc_scsi_buf_list list.
837 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
839 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
840 atomic_dec(&psb->ndlp->cmd_pending);
842 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
843 phba->lpfc_release_scsi_buf(phba, psb);
847 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
848 * @data: A pointer to the immediate command data portion of the IOCB.
849 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
851 * The routine copies the entire FCP command from @fcp_cmnd to @data while
852 * byte swapping the data to big endian format for transmission on the wire.
855 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
859 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
860 i += sizeof(uint32_t), j++) {
861 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
866 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
867 * @phba: The Hba for which this call is being executed.
868 * @lpfc_cmd: The scsi buffer which is going to be mapped.
870 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
871 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
872 * through sg elements and format the bde. This routine also initializes all
873 * IOCB fields which are dependent on scsi command request buffer.
880 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
882 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
883 struct scatterlist *sgel = NULL;
884 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
885 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
886 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
887 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
888 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
890 uint32_t num_bde = 0;
891 int nseg, datadir = scsi_cmnd->sc_data_direction;
894 * There are three possibilities here - use scatter-gather segment, use
895 * the single mapping, or neither. Start the lpfc command prep by
896 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
900 if (scsi_sg_count(scsi_cmnd)) {
902 * The driver stores the segment count returned from pci_map_sg
903 * because this a count of dma-mappings used to map the use_sg
904 * pages. They are not guaranteed to be the same for those
905 * architectures that implement an IOMMU.
908 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
909 scsi_sg_count(scsi_cmnd), datadir);
913 lpfc_cmd->seg_cnt = nseg;
914 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
916 "9064 BLKGRD: %s: Too many sg segments"
917 " from dma_map_sg. Config %d, seg_cnt"
918 " %d\n", __func__, phba->cfg_sg_seg_cnt,
920 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
921 lpfc_cmd->seg_cnt = 0;
922 scsi_dma_unmap(scsi_cmnd);
927 * The driver established a maximum scatter-gather segment count
928 * during probe that limits the number of sg elements in any
929 * single scsi command. Just run through the seg_cnt and format
931 * When using SLI-3 the driver will try to fit all the BDEs into
932 * the IOCB. If it can't then the BDEs get added to a BPL as it
933 * does for SLI-2 mode.
935 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
936 physaddr = sg_dma_address(sgel);
937 if (phba->sli_rev == 3 &&
938 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
939 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
940 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
941 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
942 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
943 data_bde->addrLow = putPaddrLow(physaddr);
944 data_bde->addrHigh = putPaddrHigh(physaddr);
947 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
948 bpl->tus.f.bdeSize = sg_dma_len(sgel);
949 bpl->tus.w = le32_to_cpu(bpl->tus.w);
951 le32_to_cpu(putPaddrLow(physaddr));
953 le32_to_cpu(putPaddrHigh(physaddr));
960 * Finish initializing those IOCB fields that are dependent on the
961 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
962 * explicitly reinitialized and for SLI-3 the extended bde count is
963 * explicitly reinitialized since all iocb memory resources are reused.
965 if (phba->sli_rev == 3 &&
966 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
967 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
968 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
970 * The extended IOCB format can only fit 3 BDE or a BPL.
971 * This I/O has more than 3 BDE so the 1st data bde will
972 * be a BPL that is filled in here.
974 physaddr = lpfc_cmd->dma_handle;
975 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
976 data_bde->tus.f.bdeSize = (num_bde *
977 sizeof(struct ulp_bde64));
978 physaddr += (sizeof(struct fcp_cmnd) +
979 sizeof(struct fcp_rsp) +
980 (2 * sizeof(struct ulp_bde64)));
981 data_bde->addrHigh = putPaddrHigh(physaddr);
982 data_bde->addrLow = putPaddrLow(physaddr);
983 /* ebde count includes the response bde and data bpl */
984 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
986 /* ebde count includes the response bde and data bdes */
987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
990 iocb_cmd->un.fcpi64.bdl.bdeSize =
991 ((num_bde + 2) * sizeof(struct ulp_bde64));
992 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
994 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
997 * Due to difference in data length between DIF/non-DIF paths,
998 * we need to set word 4 of IOCB here
1000 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1001 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1005 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1007 /* Return BG_ERR_INIT if error injection is detected by Initiator */
1008 #define BG_ERR_INIT 0x1
1009 /* Return BG_ERR_TGT if error injection is detected by Target */
1010 #define BG_ERR_TGT 0x2
1011 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1012 #define BG_ERR_SWAP 0x10
1014 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1017 #define BG_ERR_CHECK 0x20
1020 * lpfc_bg_err_inject - Determine if we should inject an error
1021 * @phba: The Hba for which this call is being executed.
1022 * @sc: The SCSI command to examine
1023 * @reftag: (out) BlockGuard reference tag for transmitted data
1024 * @apptag: (out) BlockGuard application tag for transmitted data
1025 * @new_guard: (in) Value to replace CRC with if needed
1027 * Returns BG_ERR_* bit mask or 0 if request ignored
1030 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1031 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1033 struct scatterlist *sgpe; /* s/g prot entry */
1034 struct lpfc_io_buf *lpfc_cmd = NULL;
1035 struct scsi_dif_tuple *src = NULL;
1036 struct lpfc_nodelist *ndlp;
1037 struct lpfc_rport_data *rdata;
1038 uint32_t op = scsi_get_prot_op(sc);
1045 if (op == SCSI_PROT_NORMAL)
1048 sgpe = scsi_prot_sglist(sc);
1049 lba = t10_pi_ref_tag(sc->request);
1050 if (lba == LPFC_INVALID_REFTAG)
1053 /* First check if we need to match the LBA */
1054 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1055 blksize = lpfc_cmd_blksize(sc);
1056 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1058 /* Make sure we have the right LBA if one is specified */
1059 if (phba->lpfc_injerr_lba < (u64)lba ||
1060 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1063 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1064 numblks = sg_dma_len(sgpe) /
1065 sizeof(struct scsi_dif_tuple);
1066 if (numblks < blockoff)
1071 /* Next check if we need to match the remote NPortID or WWPN */
1072 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1073 if (rdata && rdata->pnode) {
1074 ndlp = rdata->pnode;
1076 /* Make sure we have the right NPortID if one is specified */
1077 if (phba->lpfc_injerr_nportid &&
1078 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1082 * Make sure we have the right WWPN if one is specified.
1083 * wwn[0] should be a non-zero NAA in a good WWPN.
1085 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1086 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1087 sizeof(struct lpfc_name)) != 0))
1091 /* Setup a ptr to the protection data if the SCSI host provides it */
1093 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1095 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1098 /* Should we change the Reference Tag */
1100 if (phba->lpfc_injerr_wref_cnt) {
1102 case SCSI_PROT_WRITE_PASS:
1105 * For WRITE_PASS, force the error
1106 * to be sent on the wire. It should
1107 * be detected by the Target.
1108 * If blockoff != 0 error will be
1109 * inserted in middle of the IO.
1112 lpfc_printf_log(phba, KERN_ERR,
1114 "9076 BLKGRD: Injecting reftag error: "
1115 "write lba x%lx + x%x oldrefTag x%x\n",
1116 (unsigned long)lba, blockoff,
1117 be32_to_cpu(src->ref_tag));
1120 * Save the old ref_tag so we can
1121 * restore it on completion.
1124 lpfc_cmd->prot_data_type =
1126 lpfc_cmd->prot_data_segment =
1128 lpfc_cmd->prot_data =
1131 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1132 phba->lpfc_injerr_wref_cnt--;
1133 if (phba->lpfc_injerr_wref_cnt == 0) {
1134 phba->lpfc_injerr_nportid = 0;
1135 phba->lpfc_injerr_lba =
1136 LPFC_INJERR_LBA_OFF;
1137 memset(&phba->lpfc_injerr_wwpn,
1138 0, sizeof(struct lpfc_name));
1140 rc = BG_ERR_TGT | BG_ERR_CHECK;
1145 case SCSI_PROT_WRITE_INSERT:
1147 * For WRITE_INSERT, force the error
1148 * to be sent on the wire. It should be
1149 * detected by the Target.
1151 /* DEADBEEF will be the reftag on the wire */
1152 *reftag = 0xDEADBEEF;
1153 phba->lpfc_injerr_wref_cnt--;
1154 if (phba->lpfc_injerr_wref_cnt == 0) {
1155 phba->lpfc_injerr_nportid = 0;
1156 phba->lpfc_injerr_lba =
1157 LPFC_INJERR_LBA_OFF;
1158 memset(&phba->lpfc_injerr_wwpn,
1159 0, sizeof(struct lpfc_name));
1161 rc = BG_ERR_TGT | BG_ERR_CHECK;
1163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164 "9078 BLKGRD: Injecting reftag error: "
1165 "write lba x%lx\n", (unsigned long)lba);
1167 case SCSI_PROT_WRITE_STRIP:
1169 * For WRITE_STRIP and WRITE_PASS,
1170 * force the error on data
1171 * being copied from SLI-Host to SLI-Port.
1173 *reftag = 0xDEADBEEF;
1174 phba->lpfc_injerr_wref_cnt--;
1175 if (phba->lpfc_injerr_wref_cnt == 0) {
1176 phba->lpfc_injerr_nportid = 0;
1177 phba->lpfc_injerr_lba =
1178 LPFC_INJERR_LBA_OFF;
1179 memset(&phba->lpfc_injerr_wwpn,
1180 0, sizeof(struct lpfc_name));
1184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1185 "9077 BLKGRD: Injecting reftag error: "
1186 "write lba x%lx\n", (unsigned long)lba);
1190 if (phba->lpfc_injerr_rref_cnt) {
1192 case SCSI_PROT_READ_INSERT:
1193 case SCSI_PROT_READ_STRIP:
1194 case SCSI_PROT_READ_PASS:
1196 * For READ_STRIP and READ_PASS, force the
1197 * error on data being read off the wire. It
1198 * should force an IO error to the driver.
1200 *reftag = 0xDEADBEEF;
1201 phba->lpfc_injerr_rref_cnt--;
1202 if (phba->lpfc_injerr_rref_cnt == 0) {
1203 phba->lpfc_injerr_nportid = 0;
1204 phba->lpfc_injerr_lba =
1205 LPFC_INJERR_LBA_OFF;
1206 memset(&phba->lpfc_injerr_wwpn,
1207 0, sizeof(struct lpfc_name));
1211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1212 "9079 BLKGRD: Injecting reftag error: "
1213 "read lba x%lx\n", (unsigned long)lba);
1219 /* Should we change the Application Tag */
1221 if (phba->lpfc_injerr_wapp_cnt) {
1223 case SCSI_PROT_WRITE_PASS:
1226 * For WRITE_PASS, force the error
1227 * to be sent on the wire. It should
1228 * be detected by the Target.
1229 * If blockoff != 0 error will be
1230 * inserted in middle of the IO.
1233 lpfc_printf_log(phba, KERN_ERR,
1235 "9080 BLKGRD: Injecting apptag error: "
1236 "write lba x%lx + x%x oldappTag x%x\n",
1237 (unsigned long)lba, blockoff,
1238 be16_to_cpu(src->app_tag));
1241 * Save the old app_tag so we can
1242 * restore it on completion.
1245 lpfc_cmd->prot_data_type =
1247 lpfc_cmd->prot_data_segment =
1249 lpfc_cmd->prot_data =
1252 src->app_tag = cpu_to_be16(0xDEAD);
1253 phba->lpfc_injerr_wapp_cnt--;
1254 if (phba->lpfc_injerr_wapp_cnt == 0) {
1255 phba->lpfc_injerr_nportid = 0;
1256 phba->lpfc_injerr_lba =
1257 LPFC_INJERR_LBA_OFF;
1258 memset(&phba->lpfc_injerr_wwpn,
1259 0, sizeof(struct lpfc_name));
1261 rc = BG_ERR_TGT | BG_ERR_CHECK;
1265 case SCSI_PROT_WRITE_INSERT:
1267 * For WRITE_INSERT, force the
1268 * error to be sent on the wire. It should be
1269 * detected by the Target.
1271 /* DEAD will be the apptag on the wire */
1273 phba->lpfc_injerr_wapp_cnt--;
1274 if (phba->lpfc_injerr_wapp_cnt == 0) {
1275 phba->lpfc_injerr_nportid = 0;
1276 phba->lpfc_injerr_lba =
1277 LPFC_INJERR_LBA_OFF;
1278 memset(&phba->lpfc_injerr_wwpn,
1279 0, sizeof(struct lpfc_name));
1281 rc = BG_ERR_TGT | BG_ERR_CHECK;
1283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1284 "0813 BLKGRD: Injecting apptag error: "
1285 "write lba x%lx\n", (unsigned long)lba);
1287 case SCSI_PROT_WRITE_STRIP:
1289 * For WRITE_STRIP and WRITE_PASS,
1290 * force the error on data
1291 * being copied from SLI-Host to SLI-Port.
1294 phba->lpfc_injerr_wapp_cnt--;
1295 if (phba->lpfc_injerr_wapp_cnt == 0) {
1296 phba->lpfc_injerr_nportid = 0;
1297 phba->lpfc_injerr_lba =
1298 LPFC_INJERR_LBA_OFF;
1299 memset(&phba->lpfc_injerr_wwpn,
1300 0, sizeof(struct lpfc_name));
1304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1305 "0812 BLKGRD: Injecting apptag error: "
1306 "write lba x%lx\n", (unsigned long)lba);
1310 if (phba->lpfc_injerr_rapp_cnt) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_READ_PASS:
1316 * For READ_STRIP and READ_PASS, force the
1317 * error on data being read off the wire. It
1318 * should force an IO error to the driver.
1321 phba->lpfc_injerr_rapp_cnt--;
1322 if (phba->lpfc_injerr_rapp_cnt == 0) {
1323 phba->lpfc_injerr_nportid = 0;
1324 phba->lpfc_injerr_lba =
1325 LPFC_INJERR_LBA_OFF;
1326 memset(&phba->lpfc_injerr_wwpn,
1327 0, sizeof(struct lpfc_name));
1331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1332 "0814 BLKGRD: Injecting apptag error: "
1333 "read lba x%lx\n", (unsigned long)lba);
1340 /* Should we change the Guard Tag */
1342 if (phba->lpfc_injerr_wgrd_cnt) {
1344 case SCSI_PROT_WRITE_PASS:
1348 case SCSI_PROT_WRITE_INSERT:
1350 * For WRITE_INSERT, force the
1351 * error to be sent on the wire. It should be
1352 * detected by the Target.
1354 phba->lpfc_injerr_wgrd_cnt--;
1355 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1356 phba->lpfc_injerr_nportid = 0;
1357 phba->lpfc_injerr_lba =
1358 LPFC_INJERR_LBA_OFF;
1359 memset(&phba->lpfc_injerr_wwpn,
1360 0, sizeof(struct lpfc_name));
1363 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1364 /* Signals the caller to swap CRC->CSUM */
1366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1367 "0817 BLKGRD: Injecting guard error: "
1368 "write lba x%lx\n", (unsigned long)lba);
1370 case SCSI_PROT_WRITE_STRIP:
1372 * For WRITE_STRIP and WRITE_PASS,
1373 * force the error on data
1374 * being copied from SLI-Host to SLI-Port.
1376 phba->lpfc_injerr_wgrd_cnt--;
1377 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1378 phba->lpfc_injerr_nportid = 0;
1379 phba->lpfc_injerr_lba =
1380 LPFC_INJERR_LBA_OFF;
1381 memset(&phba->lpfc_injerr_wwpn,
1382 0, sizeof(struct lpfc_name));
1385 rc = BG_ERR_INIT | BG_ERR_SWAP;
1386 /* Signals the caller to swap CRC->CSUM */
1388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1389 "0816 BLKGRD: Injecting guard error: "
1390 "write lba x%lx\n", (unsigned long)lba);
1394 if (phba->lpfc_injerr_rgrd_cnt) {
1396 case SCSI_PROT_READ_INSERT:
1397 case SCSI_PROT_READ_STRIP:
1398 case SCSI_PROT_READ_PASS:
1400 * For READ_STRIP and READ_PASS, force the
1401 * error on data being read off the wire. It
1402 * should force an IO error to the driver.
1404 phba->lpfc_injerr_rgrd_cnt--;
1405 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1406 phba->lpfc_injerr_nportid = 0;
1407 phba->lpfc_injerr_lba =
1408 LPFC_INJERR_LBA_OFF;
1409 memset(&phba->lpfc_injerr_wwpn,
1410 0, sizeof(struct lpfc_name));
1413 rc = BG_ERR_INIT | BG_ERR_SWAP;
1414 /* Signals the caller to swap CRC->CSUM */
1416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1417 "0818 BLKGRD: Injecting guard error: "
1418 "read lba x%lx\n", (unsigned long)lba);
1428 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1429 * the specified SCSI command.
1430 * @phba: The Hba for which this call is being executed.
1431 * @sc: The SCSI command to examine
1432 * @txop: (out) BlockGuard operation for transmitted data
1433 * @rxop: (out) BlockGuard operation for received data
1435 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1439 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1440 uint8_t *txop, uint8_t *rxop)
1444 if (lpfc_cmd_guard_csum(sc)) {
1445 switch (scsi_get_prot_op(sc)) {
1446 case SCSI_PROT_READ_INSERT:
1447 case SCSI_PROT_WRITE_STRIP:
1448 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1449 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1452 case SCSI_PROT_READ_STRIP:
1453 case SCSI_PROT_WRITE_INSERT:
1454 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1455 *txop = BG_OP_IN_NODIF_OUT_CRC;
1458 case SCSI_PROT_READ_PASS:
1459 case SCSI_PROT_WRITE_PASS:
1460 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1461 *txop = BG_OP_IN_CSUM_OUT_CRC;
1464 case SCSI_PROT_NORMAL:
1466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1467 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1468 scsi_get_prot_op(sc));
1474 switch (scsi_get_prot_op(sc)) {
1475 case SCSI_PROT_READ_STRIP:
1476 case SCSI_PROT_WRITE_INSERT:
1477 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1478 *txop = BG_OP_IN_NODIF_OUT_CRC;
1481 case SCSI_PROT_READ_PASS:
1482 case SCSI_PROT_WRITE_PASS:
1483 *rxop = BG_OP_IN_CRC_OUT_CRC;
1484 *txop = BG_OP_IN_CRC_OUT_CRC;
1487 case SCSI_PROT_READ_INSERT:
1488 case SCSI_PROT_WRITE_STRIP:
1489 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1490 *txop = BG_OP_IN_CRC_OUT_NODIF;
1493 case SCSI_PROT_NORMAL:
1495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1496 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1497 scsi_get_prot_op(sc));
1506 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1508 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1509 * the specified SCSI command in order to force a guard tag error.
1510 * @phba: The Hba for which this call is being executed.
1511 * @sc: The SCSI command to examine
1512 * @txop: (out) BlockGuard operation for transmitted data
1513 * @rxop: (out) BlockGuard operation for received data
1515 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1519 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1520 uint8_t *txop, uint8_t *rxop)
1524 if (lpfc_cmd_guard_csum(sc)) {
1525 switch (scsi_get_prot_op(sc)) {
1526 case SCSI_PROT_READ_INSERT:
1527 case SCSI_PROT_WRITE_STRIP:
1528 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1529 *txop = BG_OP_IN_CRC_OUT_NODIF;
1532 case SCSI_PROT_READ_STRIP:
1533 case SCSI_PROT_WRITE_INSERT:
1534 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1535 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1538 case SCSI_PROT_READ_PASS:
1539 case SCSI_PROT_WRITE_PASS:
1540 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1541 *txop = BG_OP_IN_CRC_OUT_CSUM;
1544 case SCSI_PROT_NORMAL:
1550 switch (scsi_get_prot_op(sc)) {
1551 case SCSI_PROT_READ_STRIP:
1552 case SCSI_PROT_WRITE_INSERT:
1553 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1554 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1557 case SCSI_PROT_READ_PASS:
1558 case SCSI_PROT_WRITE_PASS:
1559 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1560 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1563 case SCSI_PROT_READ_INSERT:
1564 case SCSI_PROT_WRITE_STRIP:
1565 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1566 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1569 case SCSI_PROT_NORMAL:
1580 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1581 * @phba: The Hba for which this call is being executed.
1582 * @sc: pointer to scsi command we're working on
1583 * @bpl: pointer to buffer list for protection groups
1584 * @datasegcnt: number of segments of data that have been dma mapped
1586 * This function sets up BPL buffer list for protection groups of
1587 * type LPFC_PG_TYPE_NO_DIF
1589 * This is usually used when the HBA is instructed to generate
1590 * DIFs and insert them into data stream (or strip DIF from
1591 * incoming data stream)
1593 * The buffer list consists of just one protection group described
1595 * +-------------------------+
1596 * start of prot group --> | PDE_5 |
1597 * +-------------------------+
1599 * +-------------------------+
1601 * +-------------------------+
1602 * |more Data BDE's ... (opt)|
1603 * +-------------------------+
1606 * Note: Data s/g buffers have been dma mapped
1608 * Returns the number of BDEs added to the BPL.
1611 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1612 struct ulp_bde64 *bpl, int datasegcnt)
1614 struct scatterlist *sgde = NULL; /* s/g data entry */
1615 struct lpfc_pde5 *pde5 = NULL;
1616 struct lpfc_pde6 *pde6 = NULL;
1617 dma_addr_t physaddr;
1618 int i = 0, num_bde = 0, status;
1619 int datadir = sc->sc_data_direction;
1620 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1623 uint32_t checking = 1;
1627 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1631 /* extract some info from the scsi command for pde*/
1632 reftag = t10_pi_ref_tag(sc->request);
1633 if (reftag == LPFC_INVALID_REFTAG)
1636 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1637 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1639 if (rc & BG_ERR_SWAP)
1640 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1641 if (rc & BG_ERR_CHECK)
1646 /* setup PDE5 with what we have */
1647 pde5 = (struct lpfc_pde5 *) bpl;
1648 memset(pde5, 0, sizeof(struct lpfc_pde5));
1649 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1651 /* Endianness conversion if necessary for PDE5 */
1652 pde5->word0 = cpu_to_le32(pde5->word0);
1653 pde5->reftag = cpu_to_le32(reftag);
1655 /* advance bpl and increment bde count */
1658 pde6 = (struct lpfc_pde6 *) bpl;
1660 /* setup PDE6 with the rest of the info */
1661 memset(pde6, 0, sizeof(struct lpfc_pde6));
1662 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1663 bf_set(pde6_optx, pde6, txop);
1664 bf_set(pde6_oprx, pde6, rxop);
1667 * We only need to check the data on READs, for WRITEs
1668 * protection data is automatically generated, not checked.
1670 if (datadir == DMA_FROM_DEVICE) {
1671 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1672 bf_set(pde6_ce, pde6, checking);
1674 bf_set(pde6_ce, pde6, 0);
1676 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1677 bf_set(pde6_re, pde6, checking);
1679 bf_set(pde6_re, pde6, 0);
1681 bf_set(pde6_ai, pde6, 1);
1682 bf_set(pde6_ae, pde6, 0);
1683 bf_set(pde6_apptagval, pde6, 0);
1685 /* Endianness conversion if necessary for PDE6 */
1686 pde6->word0 = cpu_to_le32(pde6->word0);
1687 pde6->word1 = cpu_to_le32(pde6->word1);
1688 pde6->word2 = cpu_to_le32(pde6->word2);
1690 /* advance bpl and increment bde count */
1694 /* assumption: caller has already run dma_map_sg on command data */
1695 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1696 physaddr = sg_dma_address(sgde);
1697 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1698 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1699 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1700 if (datadir == DMA_TO_DEVICE)
1701 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1703 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1704 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1714 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1715 * @phba: The Hba for which this call is being executed.
1716 * @sc: pointer to scsi command we're working on
1717 * @bpl: pointer to buffer list for protection groups
1718 * @datacnt: number of segments of data that have been dma mapped
1719 * @protcnt: number of segment of protection data that have been dma mapped
1721 * This function sets up BPL buffer list for protection groups of
1722 * type LPFC_PG_TYPE_DIF
1724 * This is usually used when DIFs are in their own buffers,
1725 * separate from the data. The HBA can then by instructed
1726 * to place the DIFs in the outgoing stream. For read operations,
1727 * The HBA could extract the DIFs and place it in DIF buffers.
1729 * The buffer list for this type consists of one or more of the
1730 * protection groups described below:
1731 * +-------------------------+
1732 * start of first prot group --> | PDE_5 |
1733 * +-------------------------+
1735 * +-------------------------+
1736 * | PDE_7 (Prot BDE) |
1737 * +-------------------------+
1739 * +-------------------------+
1740 * |more Data BDE's ... (opt)|
1741 * +-------------------------+
1742 * start of new prot group --> | PDE_5 |
1743 * +-------------------------+
1745 * +-------------------------+
1747 * Note: It is assumed that both data and protection s/g buffers have been
1750 * Returns the number of BDEs added to the BPL.
1753 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1754 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1756 struct scatterlist *sgde = NULL; /* s/g data entry */
1757 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1758 struct lpfc_pde5 *pde5 = NULL;
1759 struct lpfc_pde6 *pde6 = NULL;
1760 struct lpfc_pde7 *pde7 = NULL;
1761 dma_addr_t dataphysaddr, protphysaddr;
1762 unsigned short curr_data = 0, curr_prot = 0;
1763 unsigned int split_offset;
1764 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1765 unsigned int protgrp_blks, protgrp_bytes;
1766 unsigned int remainder, subtotal;
1768 int datadir = sc->sc_data_direction;
1769 unsigned char pgdone = 0, alldone = 0;
1771 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1774 uint32_t checking = 1;
1779 sgpe = scsi_prot_sglist(sc);
1780 sgde = scsi_sglist(sc);
1782 if (!sgpe || !sgde) {
1783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1784 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1789 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1793 /* extract some info from the scsi command */
1794 blksize = lpfc_cmd_blksize(sc);
1795 reftag = t10_pi_ref_tag(sc->request);
1796 if (reftag == LPFC_INVALID_REFTAG)
1799 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1800 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1802 if (rc & BG_ERR_SWAP)
1803 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1804 if (rc & BG_ERR_CHECK)
1811 /* Check to see if we ran out of space */
1812 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1815 /* setup PDE5 with what we have */
1816 pde5 = (struct lpfc_pde5 *) bpl;
1817 memset(pde5, 0, sizeof(struct lpfc_pde5));
1818 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1820 /* Endianness conversion if necessary for PDE5 */
1821 pde5->word0 = cpu_to_le32(pde5->word0);
1822 pde5->reftag = cpu_to_le32(reftag);
1824 /* advance bpl and increment bde count */
1827 pde6 = (struct lpfc_pde6 *) bpl;
1829 /* setup PDE6 with the rest of the info */
1830 memset(pde6, 0, sizeof(struct lpfc_pde6));
1831 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1832 bf_set(pde6_optx, pde6, txop);
1833 bf_set(pde6_oprx, pde6, rxop);
1835 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1836 bf_set(pde6_ce, pde6, checking);
1838 bf_set(pde6_ce, pde6, 0);
1840 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1841 bf_set(pde6_re, pde6, checking);
1843 bf_set(pde6_re, pde6, 0);
1845 bf_set(pde6_ai, pde6, 1);
1846 bf_set(pde6_ae, pde6, 0);
1847 bf_set(pde6_apptagval, pde6, 0);
1849 /* Endianness conversion if necessary for PDE6 */
1850 pde6->word0 = cpu_to_le32(pde6->word0);
1851 pde6->word1 = cpu_to_le32(pde6->word1);
1852 pde6->word2 = cpu_to_le32(pde6->word2);
1854 /* advance bpl and increment bde count */
1858 /* setup the first BDE that points to protection buffer */
1859 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1860 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1862 /* must be integer multiple of the DIF block length */
1863 BUG_ON(protgroup_len % 8);
1865 pde7 = (struct lpfc_pde7 *) bpl;
1866 memset(pde7, 0, sizeof(struct lpfc_pde7));
1867 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1869 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1870 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1872 protgrp_blks = protgroup_len / 8;
1873 protgrp_bytes = protgrp_blks * blksize;
1875 /* check if this pde is crossing the 4K boundary; if so split */
1876 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1877 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1878 protgroup_offset += protgroup_remainder;
1879 protgrp_blks = protgroup_remainder / 8;
1880 protgrp_bytes = protgrp_blks * blksize;
1882 protgroup_offset = 0;
1888 /* setup BDE's for data blocks associated with DIF data */
1890 subtotal = 0; /* total bytes processed for current prot grp */
1892 /* Check to see if we ran out of space */
1893 if (num_bde >= phba->cfg_total_seg_cnt)
1897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1898 "9065 BLKGRD:%s Invalid data segment\n",
1903 dataphysaddr = sg_dma_address(sgde) + split_offset;
1904 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1905 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1907 remainder = sg_dma_len(sgde) - split_offset;
1909 if ((subtotal + remainder) <= protgrp_bytes) {
1910 /* we can use this whole buffer */
1911 bpl->tus.f.bdeSize = remainder;
1914 if ((subtotal + remainder) == protgrp_bytes)
1917 /* must split this buffer with next prot grp */
1918 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1919 split_offset += bpl->tus.f.bdeSize;
1922 subtotal += bpl->tus.f.bdeSize;
1924 if (datadir == DMA_TO_DEVICE)
1925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1936 /* Move to the next s/g segment if possible */
1937 sgde = sg_next(sgde);
1941 if (protgroup_offset) {
1942 /* update the reference tag */
1943 reftag += protgrp_blks;
1949 if (curr_prot == protcnt) {
1951 } else if (curr_prot < protcnt) {
1952 /* advance to next prot buffer */
1953 sgpe = sg_next(sgpe);
1956 /* update the reference tag */
1957 reftag += protgrp_blks;
1959 /* if we're here, we have a bug */
1960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1961 "9054 BLKGRD: bug in %s\n", __func__);
1971 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1972 * @phba: The Hba for which this call is being executed.
1973 * @sc: pointer to scsi command we're working on
1974 * @sgl: pointer to buffer list for protection groups
1975 * @datasegcnt: number of segments of data that have been dma mapped
1976 * @lpfc_cmd: lpfc scsi command object pointer.
1978 * This function sets up SGL buffer list for protection groups of
1979 * type LPFC_PG_TYPE_NO_DIF
1981 * This is usually used when the HBA is instructed to generate
1982 * DIFs and insert them into data stream (or strip DIF from
1983 * incoming data stream)
1985 * The buffer list consists of just one protection group described
1987 * +-------------------------+
1988 * start of prot group --> | DI_SEED |
1989 * +-------------------------+
1991 * +-------------------------+
1992 * |more Data SGE's ... (opt)|
1993 * +-------------------------+
1996 * Note: Data s/g buffers have been dma mapped
1998 * Returns the number of SGEs added to the SGL.
2001 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2002 struct sli4_sge *sgl, int datasegcnt,
2003 struct lpfc_io_buf *lpfc_cmd)
2005 struct scatterlist *sgde = NULL; /* s/g data entry */
2006 struct sli4_sge_diseed *diseed = NULL;
2007 dma_addr_t physaddr;
2008 int i = 0, num_sge = 0, status;
2011 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2014 uint32_t checking = 1;
2016 uint32_t dma_offset = 0;
2017 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2019 bool lsp_just_set = false;
2021 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2025 /* extract some info from the scsi command for pde*/
2026 reftag = t10_pi_ref_tag(sc->request);
2027 if (reftag == LPFC_INVALID_REFTAG)
2030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2031 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2033 if (rc & BG_ERR_SWAP)
2034 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2035 if (rc & BG_ERR_CHECK)
2040 /* setup DISEED with what we have */
2041 diseed = (struct sli4_sge_diseed *) sgl;
2042 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2043 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2045 /* Endianness conversion if necessary */
2046 diseed->ref_tag = cpu_to_le32(reftag);
2047 diseed->ref_tag_tran = diseed->ref_tag;
2050 * We only need to check the data on READs, for WRITEs
2051 * protection data is automatically generated, not checked.
2053 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2054 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2055 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2057 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2059 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2060 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2062 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2065 /* setup DISEED with the rest of the info */
2066 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2067 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2069 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2070 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2072 /* Endianness conversion if necessary for DISEED */
2073 diseed->word2 = cpu_to_le32(diseed->word2);
2074 diseed->word3 = cpu_to_le32(diseed->word3);
2076 /* advance bpl and increment sge count */
2080 /* assumption: caller has already run dma_map_sg on command data */
2081 sgde = scsi_sglist(sc);
2083 for (i = 0; i < datasegcnt; i++) {
2087 /* do we need to expand the segment */
2088 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2089 ((datasegcnt - 1) != i)) {
2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2093 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2095 if (unlikely(!sgl_xtra)) {
2096 lpfc_cmd->seg_cnt = 0;
2099 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2100 sgl_xtra->dma_phys_sgl));
2101 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2102 sgl_xtra->dma_phys_sgl));
2105 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2108 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2109 if ((datasegcnt - 1) == i)
2110 bf_set(lpfc_sli4_sge_last, sgl, 1);
2111 physaddr = sg_dma_address(sgde);
2112 dma_len = sg_dma_len(sgde);
2113 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2114 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2116 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2117 sgl->word2 = cpu_to_le32(sgl->word2);
2118 sgl->sge_len = cpu_to_le32(dma_len);
2120 dma_offset += dma_len;
2121 sgde = sg_next(sgde);
2125 lsp_just_set = false;
2128 sgl->word2 = cpu_to_le32(sgl->word2);
2129 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2131 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2134 lsp_just_set = true;
2146 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2147 * @phba: The Hba for which this call is being executed.
2148 * @sc: pointer to scsi command we're working on
2149 * @sgl: pointer to buffer list for protection groups
2150 * @datacnt: number of segments of data that have been dma mapped
2151 * @protcnt: number of segment of protection data that have been dma mapped
2152 * @lpfc_cmd: lpfc scsi command object pointer.
2154 * This function sets up SGL buffer list for protection groups of
2155 * type LPFC_PG_TYPE_DIF
2157 * This is usually used when DIFs are in their own buffers,
2158 * separate from the data. The HBA can then by instructed
2159 * to place the DIFs in the outgoing stream. For read operations,
2160 * The HBA could extract the DIFs and place it in DIF buffers.
2162 * The buffer list for this type consists of one or more of the
2163 * protection groups described below:
2164 * +-------------------------+
2165 * start of first prot group --> | DISEED |
2166 * +-------------------------+
2167 * | DIF (Prot SGE) |
2168 * +-------------------------+
2170 * +-------------------------+
2171 * |more Data SGE's ... (opt)|
2172 * +-------------------------+
2173 * start of new prot group --> | DISEED |
2174 * +-------------------------+
2176 * +-------------------------+
2178 * Note: It is assumed that both data and protection s/g buffers have been
2181 * Returns the number of SGEs added to the SGL.
2184 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2185 struct sli4_sge *sgl, int datacnt, int protcnt,
2186 struct lpfc_io_buf *lpfc_cmd)
2188 struct scatterlist *sgde = NULL; /* s/g data entry */
2189 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2190 struct sli4_sge_diseed *diseed = NULL;
2191 dma_addr_t dataphysaddr, protphysaddr;
2192 unsigned short curr_data = 0, curr_prot = 0;
2193 unsigned int split_offset;
2194 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2195 unsigned int protgrp_blks, protgrp_bytes;
2196 unsigned int remainder, subtotal;
2198 unsigned char pgdone = 0, alldone = 0;
2203 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2206 uint32_t checking = 1;
2207 uint32_t dma_offset = 0;
2208 int num_sge = 0, j = 2;
2209 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2211 sgpe = scsi_prot_sglist(sc);
2212 sgde = scsi_sglist(sc);
2214 if (!sgpe || !sgde) {
2215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2216 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2221 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2225 /* extract some info from the scsi command */
2226 blksize = lpfc_cmd_blksize(sc);
2227 reftag = t10_pi_ref_tag(sc->request);
2228 if (reftag == LPFC_INVALID_REFTAG)
2231 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2232 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2234 if (rc & BG_ERR_SWAP)
2235 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2236 if (rc & BG_ERR_CHECK)
2243 /* Check to see if we ran out of space */
2244 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2248 /* DISEED and DIF have to be together */
2249 if (!((j + 1) % phba->border_sge_num) ||
2250 !((j + 2) % phba->border_sge_num) ||
2251 !((j + 3) % phba->border_sge_num)) {
2255 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2257 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2259 if (unlikely(!sgl_xtra)) {
2262 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2263 sgl_xtra->dma_phys_sgl));
2264 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2265 sgl_xtra->dma_phys_sgl));
2268 sgl->word2 = cpu_to_le32(sgl->word2);
2269 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2271 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2275 /* setup DISEED with what we have */
2276 diseed = (struct sli4_sge_diseed *) sgl;
2277 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2278 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2280 /* Endianness conversion if necessary */
2281 diseed->ref_tag = cpu_to_le32(reftag);
2282 diseed->ref_tag_tran = diseed->ref_tag;
2284 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2285 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2288 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2290 * When in this mode, the hardware will replace
2291 * the guard tag from the host with a
2292 * newly generated good CRC for the wire.
2293 * Switch to raw mode here to avoid this
2294 * behavior. What the host sends gets put on the wire.
2296 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2297 txop = BG_OP_RAW_MODE;
2298 rxop = BG_OP_RAW_MODE;
2303 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2304 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2306 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2308 /* setup DISEED with the rest of the info */
2309 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2310 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2312 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2313 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2315 /* Endianness conversion if necessary for DISEED */
2316 diseed->word2 = cpu_to_le32(diseed->word2);
2317 diseed->word3 = cpu_to_le32(diseed->word3);
2319 /* advance sgl and increment bde count */
2325 /* setup the first BDE that points to protection buffer */
2326 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2327 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2329 /* must be integer multiple of the DIF block length */
2330 BUG_ON(protgroup_len % 8);
2332 /* Now setup DIF SGE */
2334 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2335 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2336 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2337 sgl->word2 = cpu_to_le32(sgl->word2);
2340 protgrp_blks = protgroup_len / 8;
2341 protgrp_bytes = protgrp_blks * blksize;
2343 /* check if DIF SGE is crossing the 4K boundary; if so split */
2344 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2345 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2346 protgroup_offset += protgroup_remainder;
2347 protgrp_blks = protgroup_remainder / 8;
2348 protgrp_bytes = protgrp_blks * blksize;
2350 protgroup_offset = 0;
2356 /* setup SGE's for data blocks associated with DIF data */
2358 subtotal = 0; /* total bytes processed for current prot grp */
2364 /* Check to see if we ran out of space */
2365 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2371 "9086 BLKGRD:%s Invalid data segment\n",
2376 if (!((j + 1) % phba->border_sge_num)) {
2380 bf_set(lpfc_sli4_sge_type, sgl,
2383 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2386 if (unlikely(!sgl_xtra)) {
2389 sgl->addr_lo = cpu_to_le32(
2390 putPaddrLow(sgl_xtra->dma_phys_sgl));
2391 sgl->addr_hi = cpu_to_le32(
2392 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2395 sgl->word2 = cpu_to_le32(sgl->word2);
2396 sgl->sge_len = cpu_to_le32(
2397 phba->cfg_sg_dma_buf_size);
2399 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2401 dataphysaddr = sg_dma_address(sgde) +
2404 remainder = sg_dma_len(sgde) - split_offset;
2406 if ((subtotal + remainder) <= protgrp_bytes) {
2407 /* we can use this whole buffer */
2408 dma_len = remainder;
2411 if ((subtotal + remainder) ==
2415 /* must split this buffer with next
2418 dma_len = protgrp_bytes - subtotal;
2419 split_offset += dma_len;
2422 subtotal += dma_len;
2425 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2427 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2429 bf_set(lpfc_sli4_sge_last, sgl, 0);
2430 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2431 bf_set(lpfc_sli4_sge_type, sgl,
2432 LPFC_SGE_TYPE_DATA);
2434 sgl->sge_len = cpu_to_le32(dma_len);
2435 dma_offset += dma_len;
2446 /* Move to the next s/g segment if possible */
2447 sgde = sg_next(sgde);
2455 if (protgroup_offset) {
2456 /* update the reference tag */
2457 reftag += protgrp_blks;
2462 if (curr_prot == protcnt) {
2463 /* mark the last SGL */
2465 bf_set(lpfc_sli4_sge_last, sgl, 1);
2467 } else if (curr_prot < protcnt) {
2468 /* advance to next prot buffer */
2469 sgpe = sg_next(sgpe);
2471 /* update the reference tag */
2472 reftag += protgrp_blks;
2474 /* if we're here, we have a bug */
2475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2476 "9085 BLKGRD: bug in %s\n", __func__);
2487 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2488 * @phba: The Hba for which this call is being executed.
2489 * @sc: pointer to scsi command we're working on
2491 * Given a SCSI command that supports DIF, determine composition of protection
2492 * groups involved in setting up buffer lists
2494 * Returns: Protection group type (with or without DIF)
2498 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2500 int ret = LPFC_PG_TYPE_INVALID;
2501 unsigned char op = scsi_get_prot_op(sc);
2504 case SCSI_PROT_READ_STRIP:
2505 case SCSI_PROT_WRITE_INSERT:
2506 ret = LPFC_PG_TYPE_NO_DIF;
2508 case SCSI_PROT_READ_INSERT:
2509 case SCSI_PROT_WRITE_STRIP:
2510 case SCSI_PROT_READ_PASS:
2511 case SCSI_PROT_WRITE_PASS:
2512 ret = LPFC_PG_TYPE_DIF_BUF;
2516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2517 "9021 Unsupported protection op:%d\n",
2525 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2526 * @phba: The Hba for which this call is being executed.
2527 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2529 * Adjust the data length to account for how much data
2530 * is actually on the wire.
2532 * returns the adjusted data length
2535 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2536 struct lpfc_io_buf *lpfc_cmd)
2538 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2541 fcpdl = scsi_bufflen(sc);
2543 /* Check if there is protection data on the wire */
2544 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2545 /* Read check for protection data */
2546 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2550 /* Write check for protection data */
2551 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2556 * If we are in DIF Type 1 mode every data block has a 8 byte
2557 * DIF (trailer) attached to it. Must ajust FCP data length
2558 * to account for the protection data.
2560 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2566 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2567 * @phba: The Hba for which this call is being executed.
2568 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2570 * This is the protection/DIF aware version of
2571 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2572 * two functions eventually, but for now, it's here.
2573 * RETURNS 0 - SUCCESS,
2574 * 1 - Failed DMA map, retry.
2575 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2578 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2579 struct lpfc_io_buf *lpfc_cmd)
2581 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2582 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2583 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2584 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2585 uint32_t num_bde = 0;
2586 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2587 int prot_group_type = 0;
2590 struct lpfc_vport *vport = phba->pport;
2593 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2594 * fcp_rsp regions to the first data bde entry
2597 if (scsi_sg_count(scsi_cmnd)) {
2599 * The driver stores the segment count returned from pci_map_sg
2600 * because this a count of dma-mappings used to map the use_sg
2601 * pages. They are not guaranteed to be the same for those
2602 * architectures that implement an IOMMU.
2604 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2605 scsi_sglist(scsi_cmnd),
2606 scsi_sg_count(scsi_cmnd), datadir);
2607 if (unlikely(!datasegcnt))
2610 lpfc_cmd->seg_cnt = datasegcnt;
2612 /* First check if data segment count from SCSI Layer is good */
2613 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2614 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2619 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2621 switch (prot_group_type) {
2622 case LPFC_PG_TYPE_NO_DIF:
2624 /* Here we need to add a PDE5 and PDE6 to the count */
2625 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2630 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2632 /* we should have 2 or more entries in buffer list */
2639 case LPFC_PG_TYPE_DIF_BUF:
2641 * This type indicates that protection buffers are
2642 * passed to the driver, so that needs to be prepared
2645 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2646 scsi_prot_sglist(scsi_cmnd),
2647 scsi_prot_sg_count(scsi_cmnd), datadir);
2648 if (unlikely(!protsegcnt)) {
2649 scsi_dma_unmap(scsi_cmnd);
2653 lpfc_cmd->prot_seg_cnt = protsegcnt;
2656 * There is a minimun of 4 BPLs used for every
2657 * protection data segment.
2659 if ((lpfc_cmd->prot_seg_cnt * 4) >
2660 (phba->cfg_total_seg_cnt - 2)) {
2665 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2666 datasegcnt, protsegcnt);
2667 /* we should have 3 or more entries in buffer list */
2668 if ((num_bde < 3) ||
2669 (num_bde > phba->cfg_total_seg_cnt)) {
2675 case LPFC_PG_TYPE_INVALID:
2677 scsi_dma_unmap(scsi_cmnd);
2678 lpfc_cmd->seg_cnt = 0;
2680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2681 "9022 Unexpected protection group %i\n",
2688 * Finish initializing those IOCB fields that are dependent on the
2689 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2690 * reinitialized since all iocb memory resources are used many times
2691 * for transmit, receive, and continuation bpl's.
2693 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2694 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2695 iocb_cmd->ulpBdeCount = 1;
2696 iocb_cmd->ulpLe = 1;
2698 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2699 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2702 * Due to difference in data length between DIF/non-DIF paths,
2703 * we need to set word 4 of IOCB here
2705 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2708 * For First burst, we may need to adjust the initial transfer
2711 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2712 (fcpdl < vport->cfg_first_burst_size))
2713 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2717 if (lpfc_cmd->seg_cnt)
2718 scsi_dma_unmap(scsi_cmnd);
2719 if (lpfc_cmd->prot_seg_cnt)
2720 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2721 scsi_prot_sg_count(scsi_cmnd),
2722 scsi_cmnd->sc_data_direction);
2724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2725 "9023 Cannot setup S/G List for HBA"
2726 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2727 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2728 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2729 prot_group_type, num_bde);
2731 lpfc_cmd->seg_cnt = 0;
2732 lpfc_cmd->prot_seg_cnt = 0;
2737 * This function calcuates the T10 DIF guard tag
2738 * on the specified data using a CRC algorithmn
2742 lpfc_bg_crc(uint8_t *data, int count)
2747 crc = crc_t10dif(data, count);
2748 x = cpu_to_be16(crc);
2753 * This function calcuates the T10 DIF guard tag
2754 * on the specified data using a CSUM algorithmn
2755 * using ip_compute_csum.
2758 lpfc_bg_csum(uint8_t *data, int count)
2762 ret = ip_compute_csum(data, count);
2767 * This function examines the protection data to try to determine
2768 * what type of T10-DIF error occurred.
2771 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2773 struct scatterlist *sgpe; /* s/g prot entry */
2774 struct scatterlist *sgde; /* s/g data entry */
2775 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2776 struct scsi_dif_tuple *src = NULL;
2777 uint8_t *data_src = NULL;
2779 uint16_t start_app_tag, app_tag;
2780 uint32_t start_ref_tag, ref_tag;
2781 int prot, protsegcnt;
2782 int err_type, len, data_len;
2783 int chk_ref, chk_app, chk_guard;
2787 err_type = BGS_GUARD_ERR_MASK;
2791 /* First check to see if there is protection data to examine */
2792 prot = scsi_get_prot_op(cmd);
2793 if ((prot == SCSI_PROT_READ_STRIP) ||
2794 (prot == SCSI_PROT_WRITE_INSERT) ||
2795 (prot == SCSI_PROT_NORMAL))
2798 /* Currently the driver just supports ref_tag and guard_tag checking */
2803 /* Setup a ptr to the protection data provided by the SCSI host */
2804 sgpe = scsi_prot_sglist(cmd);
2805 protsegcnt = lpfc_cmd->prot_seg_cnt;
2807 if (sgpe && protsegcnt) {
2810 * We will only try to verify guard tag if the segment
2811 * data length is a multiple of the blksize.
2813 sgde = scsi_sglist(cmd);
2814 blksize = lpfc_cmd_blksize(cmd);
2815 data_src = (uint8_t *)sg_virt(sgde);
2816 data_len = sgde->length;
2817 if ((data_len & (blksize - 1)) == 0)
2820 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2821 start_ref_tag = t10_pi_ref_tag(cmd->request);
2822 if (start_ref_tag == LPFC_INVALID_REFTAG)
2824 start_app_tag = src->app_tag;
2826 while (src && protsegcnt) {
2830 * First check to see if a protection data
2833 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2834 (src->app_tag == T10_PI_APP_ESCAPE)) {
2839 /* First Guard Tag checking */
2841 guard_tag = src->guard_tag;
2842 if (lpfc_cmd_guard_csum(cmd))
2843 sum = lpfc_bg_csum(data_src,
2846 sum = lpfc_bg_crc(data_src,
2848 if ((guard_tag != sum)) {
2849 err_type = BGS_GUARD_ERR_MASK;
2854 /* Reference Tag checking */
2855 ref_tag = be32_to_cpu(src->ref_tag);
2856 if (chk_ref && (ref_tag != start_ref_tag)) {
2857 err_type = BGS_REFTAG_ERR_MASK;
2862 /* App Tag checking */
2863 app_tag = src->app_tag;
2864 if (chk_app && (app_tag != start_app_tag)) {
2865 err_type = BGS_APPTAG_ERR_MASK;
2869 len -= sizeof(struct scsi_dif_tuple);
2874 data_src += blksize;
2875 data_len -= blksize;
2878 * Are we at the end of the Data segment?
2879 * The data segment is only used for Guard
2882 if (chk_guard && (data_len == 0)) {
2884 sgde = sg_next(sgde);
2888 data_src = (uint8_t *)sg_virt(sgde);
2889 data_len = sgde->length;
2890 if ((data_len & (blksize - 1)) == 0)
2895 /* Goto the next Protection data segment */
2896 sgpe = sg_next(sgpe);
2898 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2907 if (err_type == BGS_GUARD_ERR_MASK) {
2908 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2909 set_host_byte(cmd, DID_ABORT);
2910 phba->bg_guard_err_cnt++;
2911 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2912 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2913 t10_pi_ref_tag(cmd->request),
2916 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2917 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2918 set_host_byte(cmd, DID_ABORT);
2920 phba->bg_reftag_err_cnt++;
2921 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2922 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2923 t10_pi_ref_tag(cmd->request),
2924 ref_tag, start_ref_tag);
2926 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2927 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2928 set_host_byte(cmd, DID_ABORT);
2930 phba->bg_apptag_err_cnt++;
2931 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2932 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2933 t10_pi_ref_tag(cmd->request),
2934 app_tag, start_app_tag);
2939 * This function checks for BlockGuard errors detected by
2940 * the HBA. In case of errors, the ASC/ASCQ fields in the
2941 * sense buffer will be set accordingly, paired with
2942 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2943 * detected corruption.
2946 * 0 - No error found
2947 * 1 - BlockGuard error found
2948 * -1 - Internal error (bad profile, ...etc)
2951 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2952 struct lpfc_wcqe_complete *wcqe)
2954 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2956 u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2959 u64 failing_sector = 0;
2961 if (status == CQE_STATUS_DI_ERROR) {
2962 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
2963 bgstat |= BGS_GUARD_ERR_MASK;
2964 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
2965 bgstat |= BGS_APPTAG_ERR_MASK;
2966 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
2967 bgstat |= BGS_REFTAG_ERR_MASK;
2969 /* Check to see if there was any good data before the error */
2970 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2971 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2972 bghm = wcqe->total_data_placed;
2976 * Set ALL the error bits to indicate we don't know what
2977 * type of error it is.
2980 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2981 BGS_GUARD_ERR_MASK);
2984 if (lpfc_bgs_get_guard_err(bgstat)) {
2987 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2988 set_host_byte(cmd, DID_ABORT);
2989 phba->bg_guard_err_cnt++;
2990 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2991 "9059 BLKGRD: Guard Tag error in cmd"
2992 " 0x%x lba 0x%llx blk cnt 0x%x "
2993 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2994 (unsigned long long)scsi_get_lba(cmd),
2995 blk_rq_sectors(cmd->request), bgstat, bghm);
2998 if (lpfc_bgs_get_reftag_err(bgstat)) {
3001 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3002 set_host_byte(cmd, DID_ABORT);
3004 phba->bg_reftag_err_cnt++;
3005 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3006 "9060 BLKGRD: Ref Tag error in cmd"
3007 " 0x%x lba 0x%llx blk cnt 0x%x "
3008 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3009 (unsigned long long)scsi_get_lba(cmd),
3010 blk_rq_sectors(cmd->request), bgstat, bghm);
3013 if (lpfc_bgs_get_apptag_err(bgstat)) {
3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3017 set_host_byte(cmd, DID_ABORT);
3019 phba->bg_apptag_err_cnt++;
3020 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3021 "9062 BLKGRD: App Tag error in cmd"
3022 " 0x%x lba 0x%llx blk cnt 0x%x "
3023 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3024 (unsigned long long)scsi_get_lba(cmd),
3025 blk_rq_sectors(cmd->request), bgstat, bghm);
3028 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3030 * setup sense data descriptor 0 per SPC-4 as an information
3031 * field, and put the failing LBA in it.
3032 * This code assumes there was also a guard/app/ref tag error
3035 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3036 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3037 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3038 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3040 /* bghm is a "on the wire" FC frame based count */
3041 switch (scsi_get_prot_op(cmd)) {
3042 case SCSI_PROT_READ_INSERT:
3043 case SCSI_PROT_WRITE_STRIP:
3044 bghm /= cmd->device->sector_size;
3046 case SCSI_PROT_READ_STRIP:
3047 case SCSI_PROT_WRITE_INSERT:
3048 case SCSI_PROT_READ_PASS:
3049 case SCSI_PROT_WRITE_PASS:
3050 bghm /= (cmd->device->sector_size +
3051 sizeof(struct scsi_dif_tuple));
3055 failing_sector = scsi_get_lba(cmd);
3056 failing_sector += bghm;
3058 /* Descriptor Information */
3059 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3063 /* No error was reported - problem in FW? */
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3065 "9068 BLKGRD: Unknown error in cmd"
3066 " 0x%x lba 0x%llx blk cnt 0x%x "
3067 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3068 (unsigned long long)scsi_get_lba(cmd),
3069 blk_rq_sectors(cmd->request), bgstat, bghm);
3071 /* Calcuate what type of error it was */
3072 lpfc_calc_bg_err(phba, lpfc_cmd);
3078 * This function checks for BlockGuard errors detected by
3079 * the HBA. In case of errors, the ASC/ASCQ fields in the
3080 * sense buffer will be set accordingly, paired with
3081 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3082 * detected corruption.
3085 * 0 - No error found
3086 * 1 - BlockGuard error found
3087 * -1 - Internal error (bad profile, ...etc)
3090 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3091 struct lpfc_iocbq *pIocbOut)
3093 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3094 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3096 uint32_t bghm = bgf->bghm;
3097 uint32_t bgstat = bgf->bgstat;
3098 uint64_t failing_sector = 0;
3100 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3101 cmd->result = DID_ERROR << 16;
3102 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3103 "9072 BLKGRD: Invalid BG Profile in cmd "
3104 "0x%x reftag 0x%x blk cnt 0x%x "
3105 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3106 t10_pi_ref_tag(cmd->request),
3107 blk_rq_sectors(cmd->request), bgstat, bghm);
3112 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3113 cmd->result = DID_ERROR << 16;
3114 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3115 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3116 "0x%x reftag 0x%x blk cnt 0x%x "
3117 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3118 t10_pi_ref_tag(cmd->request),
3119 blk_rq_sectors(cmd->request), bgstat, bghm);
3124 if (lpfc_bgs_get_guard_err(bgstat)) {
3127 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3128 set_host_byte(cmd, DID_ABORT);
3129 phba->bg_guard_err_cnt++;
3130 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3131 "9055 BLKGRD: Guard Tag error in cmd "
3132 "0x%x reftag 0x%x blk cnt 0x%x "
3133 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3134 t10_pi_ref_tag(cmd->request),
3135 blk_rq_sectors(cmd->request), bgstat, bghm);
3138 if (lpfc_bgs_get_reftag_err(bgstat)) {
3141 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3142 set_host_byte(cmd, DID_ABORT);
3144 phba->bg_reftag_err_cnt++;
3145 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3146 "9056 BLKGRD: Ref Tag error in cmd "
3147 "0x%x reftag 0x%x blk cnt 0x%x "
3148 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3149 t10_pi_ref_tag(cmd->request),
3150 blk_rq_sectors(cmd->request), bgstat, bghm);
3153 if (lpfc_bgs_get_apptag_err(bgstat)) {
3156 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3157 set_host_byte(cmd, DID_ABORT);
3159 phba->bg_apptag_err_cnt++;
3160 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3161 "9061 BLKGRD: App Tag error in cmd "
3162 "0x%x reftag 0x%x blk cnt 0x%x "
3163 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3164 t10_pi_ref_tag(cmd->request),
3165 blk_rq_sectors(cmd->request), bgstat, bghm);
3168 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3170 * setup sense data descriptor 0 per SPC-4 as an information
3171 * field, and put the failing LBA in it.
3172 * This code assumes there was also a guard/app/ref tag error
3175 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3176 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3177 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3178 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3180 /* bghm is a "on the wire" FC frame based count */
3181 switch (scsi_get_prot_op(cmd)) {
3182 case SCSI_PROT_READ_INSERT:
3183 case SCSI_PROT_WRITE_STRIP:
3184 bghm /= cmd->device->sector_size;
3186 case SCSI_PROT_READ_STRIP:
3187 case SCSI_PROT_WRITE_INSERT:
3188 case SCSI_PROT_READ_PASS:
3189 case SCSI_PROT_WRITE_PASS:
3190 bghm /= (cmd->device->sector_size +
3191 sizeof(struct scsi_dif_tuple));
3195 failing_sector = scsi_get_lba(cmd);
3196 failing_sector += bghm;
3198 /* Descriptor Information */
3199 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3203 /* No error was reported - problem in FW? */
3204 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3205 "9057 BLKGRD: Unknown error in cmd "
3206 "0x%x reftag 0x%x blk cnt 0x%x "
3207 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3208 t10_pi_ref_tag(cmd->request),
3209 blk_rq_sectors(cmd->request), bgstat, bghm);
3211 /* Calcuate what type of error it was */
3212 lpfc_calc_bg_err(phba, lpfc_cmd);
3219 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3220 * @phba: The Hba for which this call is being executed.
3221 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3223 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3224 * field of @lpfc_cmd for device with SLI-4 interface spec.
3227 * 2 - Error - Do not retry
3232 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3234 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3235 struct scatterlist *sgel = NULL;
3236 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3237 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3238 struct sli4_sge *first_data_sgl;
3239 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3240 struct lpfc_vport *vport = phba->pport;
3241 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3242 dma_addr_t physaddr;
3243 uint32_t num_bde = 0;
3245 uint32_t dma_offset = 0;
3247 struct ulp_bde64 *bde;
3248 bool lsp_just_set = false;
3249 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3252 * There are three possibilities here - use scatter-gather segment, use
3253 * the single mapping, or neither. Start the lpfc command prep by
3254 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3257 if (scsi_sg_count(scsi_cmnd)) {
3259 * The driver stores the segment count returned from pci_map_sg
3260 * because this a count of dma-mappings used to map the use_sg
3261 * pages. They are not guaranteed to be the same for those
3262 * architectures that implement an IOMMU.
3265 nseg = scsi_dma_map(scsi_cmnd);
3266 if (unlikely(nseg <= 0))
3269 /* clear the last flag in the fcp_rsp map entry */
3270 sgl->word2 = le32_to_cpu(sgl->word2);
3271 bf_set(lpfc_sli4_sge_last, sgl, 0);
3272 sgl->word2 = cpu_to_le32(sgl->word2);
3274 first_data_sgl = sgl;
3275 lpfc_cmd->seg_cnt = nseg;
3276 if (!phba->cfg_xpsgl &&
3277 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3280 " %s: Too many sg segments from "
3281 "dma_map_sg. Config %d, seg_cnt %d\n",
3282 __func__, phba->cfg_sg_seg_cnt,
3284 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3285 lpfc_cmd->seg_cnt = 0;
3286 scsi_dma_unmap(scsi_cmnd);
3291 * The driver established a maximum scatter-gather segment count
3292 * during probe that limits the number of sg elements in any
3293 * single scsi command. Just run through the seg_cnt and format
3295 * When using SLI-3 the driver will try to fit all the BDEs into
3296 * the IOCB. If it can't then the BDEs get added to a BPL as it
3297 * does for SLI-2 mode.
3300 /* for tracking segment boundaries */
3301 sgel = scsi_sglist(scsi_cmnd);
3303 for (i = 0; i < nseg; i++) {
3305 if ((num_bde + 1) == nseg) {
3306 bf_set(lpfc_sli4_sge_last, sgl, 1);
3307 bf_set(lpfc_sli4_sge_type, sgl,
3308 LPFC_SGE_TYPE_DATA);
3310 bf_set(lpfc_sli4_sge_last, sgl, 0);
3312 /* do we need to expand the segment */
3313 if (!lsp_just_set &&
3314 !((j + 1) % phba->border_sge_num) &&
3315 ((nseg - 1) != i)) {
3317 bf_set(lpfc_sli4_sge_type, sgl,
3320 sgl_xtra = lpfc_get_sgl_per_hdwq(
3323 if (unlikely(!sgl_xtra)) {
3324 lpfc_cmd->seg_cnt = 0;
3325 scsi_dma_unmap(scsi_cmnd);
3328 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3329 sgl_xtra->dma_phys_sgl));
3330 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3331 sgl_xtra->dma_phys_sgl));
3334 bf_set(lpfc_sli4_sge_type, sgl,
3335 LPFC_SGE_TYPE_DATA);
3339 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3340 LPFC_SGE_TYPE_LSP)) {
3341 if ((nseg - 1) == i)
3342 bf_set(lpfc_sli4_sge_last, sgl, 1);
3344 physaddr = sg_dma_address(sgel);
3345 dma_len = sg_dma_len(sgel);
3346 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3348 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3351 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3352 sgl->word2 = cpu_to_le32(sgl->word2);
3353 sgl->sge_len = cpu_to_le32(dma_len);
3355 dma_offset += dma_len;
3356 sgel = sg_next(sgel);
3359 lsp_just_set = false;
3362 sgl->word2 = cpu_to_le32(sgl->word2);
3363 sgl->sge_len = cpu_to_le32(
3364 phba->cfg_sg_dma_buf_size);
3366 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3369 lsp_just_set = true;
3375 * Setup the first Payload BDE. For FCoE we just key off
3376 * Performance Hints, for FC we use lpfc_enable_pbde.
3377 * We populate words 13-15 of IOCB/WQE.
3379 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3380 phba->cfg_enable_pbde) {
3381 bde = (struct ulp_bde64 *)
3383 bde->addrLow = first_data_sgl->addr_lo;
3384 bde->addrHigh = first_data_sgl->addr_hi;
3385 bde->tus.f.bdeSize =
3386 le32_to_cpu(first_data_sgl->sge_len);
3387 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3388 bde->tus.w = cpu_to_le32(bde->tus.w);
3391 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3395 /* clear the last flag in the fcp_rsp map entry */
3396 sgl->word2 = le32_to_cpu(sgl->word2);
3397 bf_set(lpfc_sli4_sge_last, sgl, 1);
3398 sgl->word2 = cpu_to_le32(sgl->word2);
3400 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3401 phba->cfg_enable_pbde) {
3402 bde = (struct ulp_bde64 *)
3404 memset(bde, 0, (sizeof(uint32_t) * 3));
3409 if (phba->cfg_enable_pbde)
3410 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3413 * Finish initializing those IOCB fields that are dependent on the
3414 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3415 * explicitly reinitialized.
3416 * all iocb memory resources are reused.
3418 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3419 /* Set first-burst provided it was successfully negotiated */
3420 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3421 vport->cfg_first_burst_size &&
3422 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3423 u32 init_len, total_len;
3425 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3426 init_len = min(total_len, vport->cfg_first_burst_size);
3429 wqe->fcp_iwrite.initial_xfer_len = init_len;
3430 wqe->fcp_iwrite.total_xfer_len = total_len;
3433 wqe->fcp_iwrite.total_xfer_len =
3434 be32_to_cpu(fcp_cmnd->fcpDl);
3438 * If the OAS driver feature is enabled and the lun is enabled for
3439 * OAS, set the oas iocb related flags.
3441 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3442 scsi_cmnd->device->hostdata)->oas_enabled) {
3443 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3444 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3445 scsi_cmnd->device->hostdata)->priority;
3448 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3449 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3451 if (lpfc_cmd->cur_iocbq.priority)
3452 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3453 (lpfc_cmd->cur_iocbq.priority << 1));
3455 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3456 (phba->cfg_XLanePriority << 1));
3463 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3464 * @phba: The Hba for which this call is being executed.
3465 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3467 * This is the protection/DIF aware version of
3468 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3469 * two functions eventually, but for now, it's here
3471 * 2 - Error - Do not retry
3476 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3477 struct lpfc_io_buf *lpfc_cmd)
3479 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3480 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3481 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3482 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3483 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3484 uint32_t num_sge = 0;
3485 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3486 int prot_group_type = 0;
3489 struct lpfc_vport *vport = phba->pport;
3492 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3493 * fcp_rsp regions to the first data sge entry
3495 if (scsi_sg_count(scsi_cmnd)) {
3497 * The driver stores the segment count returned from pci_map_sg
3498 * because this a count of dma-mappings used to map the use_sg
3499 * pages. They are not guaranteed to be the same for those
3500 * architectures that implement an IOMMU.
3502 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3503 scsi_sglist(scsi_cmnd),
3504 scsi_sg_count(scsi_cmnd), datadir);
3505 if (unlikely(!datasegcnt))
3509 /* clear the last flag in the fcp_rsp map entry */
3510 sgl->word2 = le32_to_cpu(sgl->word2);
3511 bf_set(lpfc_sli4_sge_last, sgl, 0);
3512 sgl->word2 = cpu_to_le32(sgl->word2);
3515 lpfc_cmd->seg_cnt = datasegcnt;
3517 /* First check if data segment count from SCSI Layer is good */
3518 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3520 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3525 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3527 switch (prot_group_type) {
3528 case LPFC_PG_TYPE_NO_DIF:
3529 /* Here we need to add a DISEED to the count */
3530 if (((lpfc_cmd->seg_cnt + 1) >
3531 phba->cfg_total_seg_cnt) &&
3537 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3538 datasegcnt, lpfc_cmd);
3540 /* we should have 2 or more entries in buffer list */
3547 case LPFC_PG_TYPE_DIF_BUF:
3549 * This type indicates that protection buffers are
3550 * passed to the driver, so that needs to be prepared
3553 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3554 scsi_prot_sglist(scsi_cmnd),
3555 scsi_prot_sg_count(scsi_cmnd), datadir);
3556 if (unlikely(!protsegcnt)) {
3557 scsi_dma_unmap(scsi_cmnd);
3561 lpfc_cmd->prot_seg_cnt = protsegcnt;
3563 * There is a minimun of 3 SGEs used for every
3564 * protection data segment.
3566 if (((lpfc_cmd->prot_seg_cnt * 3) >
3567 (phba->cfg_total_seg_cnt - 2)) &&
3573 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3574 datasegcnt, protsegcnt, lpfc_cmd);
3576 /* we should have 3 or more entries in buffer list */
3578 (num_sge > phba->cfg_total_seg_cnt &&
3579 !phba->cfg_xpsgl)) {
3585 case LPFC_PG_TYPE_INVALID:
3587 scsi_dma_unmap(scsi_cmnd);
3588 lpfc_cmd->seg_cnt = 0;
3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3591 "9083 Unexpected protection group %i\n",
3597 switch (scsi_get_prot_op(scsi_cmnd)) {
3598 case SCSI_PROT_WRITE_STRIP:
3599 case SCSI_PROT_READ_STRIP:
3600 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3602 case SCSI_PROT_WRITE_INSERT:
3603 case SCSI_PROT_READ_INSERT:
3604 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3606 case SCSI_PROT_WRITE_PASS:
3607 case SCSI_PROT_READ_PASS:
3608 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3612 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3613 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3615 /* Set first-burst provided it was successfully negotiated */
3616 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3617 vport->cfg_first_burst_size &&
3618 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3619 u32 init_len, total_len;
3621 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3622 init_len = min(total_len, vport->cfg_first_burst_size);
3625 wqe->fcp_iwrite.initial_xfer_len = init_len;
3626 wqe->fcp_iwrite.total_xfer_len = total_len;
3629 wqe->fcp_iwrite.total_xfer_len =
3630 be32_to_cpu(fcp_cmnd->fcpDl);
3634 * If the OAS driver feature is enabled and the lun is enabled for
3635 * OAS, set the oas iocb related flags.
3637 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3638 scsi_cmnd->device->hostdata)->oas_enabled) {
3639 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3642 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3643 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3644 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3645 (phba->cfg_XLanePriority << 1));
3648 /* Word 7. DIF Flags */
3649 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3650 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3651 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3652 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3653 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3654 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3656 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3657 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3661 if (lpfc_cmd->seg_cnt)
3662 scsi_dma_unmap(scsi_cmnd);
3663 if (lpfc_cmd->prot_seg_cnt)
3664 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3665 scsi_prot_sg_count(scsi_cmnd),
3666 scsi_cmnd->sc_data_direction);
3668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3669 "9084 Cannot setup S/G List for HBA"
3670 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3671 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3672 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3673 prot_group_type, num_sge);
3675 lpfc_cmd->seg_cnt = 0;
3676 lpfc_cmd->prot_seg_cnt = 0;
3681 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3682 * @phba: The Hba for which this call is being executed.
3683 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3685 * This routine wraps the actual DMA mapping function pointer from the
3693 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3695 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3699 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3701 * @phba: The Hba for which this call is being executed.
3702 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3704 * This routine wraps the actual DMA mapping function pointer from the
3712 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3714 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3718 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3720 * @vport: Pointer to vport object.
3721 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3722 * @tmo: Timeout value for IO
3724 * This routine initializes IOCB/WQE data structure from scsi command
3731 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3734 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3738 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3739 * @phba: Pointer to hba context object.
3740 * @vport: Pointer to vport object.
3741 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3742 * @fcpi_parm: FCP Initiator parameter.
3744 * This function posts an event when there is a SCSI command reporting
3745 * error from the scsi device.
3748 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3749 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3750 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3751 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3752 uint32_t resp_info = fcprsp->rspStatus2;
3753 uint32_t scsi_status = fcprsp->rspStatus3;
3754 struct lpfc_fast_path_event *fast_path_evt = NULL;
3755 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3756 unsigned long flags;
3761 /* If there is queuefull or busy condition send a scsi event */
3762 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3763 (cmnd->result == SAM_STAT_BUSY)) {
3764 fast_path_evt = lpfc_alloc_fast_evt(phba);
3767 fast_path_evt->un.scsi_evt.event_type =
3769 fast_path_evt->un.scsi_evt.subcategory =
3770 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3771 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3772 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3773 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3774 &pnode->nlp_portname, sizeof(struct lpfc_name));
3775 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3776 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3777 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3778 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3779 fast_path_evt = lpfc_alloc_fast_evt(phba);
3782 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3784 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3785 LPFC_EVENT_CHECK_COND;
3786 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3788 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3789 &pnode->nlp_portname, sizeof(struct lpfc_name));
3790 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3791 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3792 fast_path_evt->un.check_cond_evt.sense_key =
3793 cmnd->sense_buffer[2] & 0xf;
3794 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3795 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3796 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3798 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3799 ((scsi_status == SAM_STAT_GOOD) &&
3800 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3802 * If status is good or resid does not match with fcp_param and
3803 * there is valid fcpi_parm, then there is a read_check error
3805 fast_path_evt = lpfc_alloc_fast_evt(phba);
3808 fast_path_evt->un.read_check_error.header.event_type =
3809 FC_REG_FABRIC_EVENT;
3810 fast_path_evt->un.read_check_error.header.subcategory =
3811 LPFC_EVENT_FCPRDCHKERR;
3812 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3813 &pnode->nlp_portname, sizeof(struct lpfc_name));
3814 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3815 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3816 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3817 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3818 fast_path_evt->un.read_check_error.fcpiparam =
3823 fast_path_evt->vport = vport;
3824 spin_lock_irqsave(&phba->hbalock, flags);
3825 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3826 spin_unlock_irqrestore(&phba->hbalock, flags);
3827 lpfc_worker_wake_up(phba);
3832 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3833 * @phba: The HBA for which this call is being executed.
3834 * @psb: The scsi buffer which is going to be un-mapped.
3836 * This routine does DMA un-mapping of scatter gather list of scsi command
3837 * field of @lpfc_cmd for device with SLI-3 interface spec.
3840 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3843 * There are only two special cases to consider. (1) the scsi command
3844 * requested scatter-gather usage or (2) the scsi command allocated
3845 * a request buffer, but did not request use_sg. There is a third
3846 * case, but it does not require resource deallocation.
3848 if (psb->seg_cnt > 0)
3849 scsi_dma_unmap(psb->pCmd);
3850 if (psb->prot_seg_cnt > 0)
3851 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3852 scsi_prot_sg_count(psb->pCmd),
3853 psb->pCmd->sc_data_direction);
3857 * lpfc_handle_fcp_err - FCP response handler
3858 * @vport: The virtual port for which this call is being executed.
3859 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3860 * @fcpi_parm: FCP Initiator parameter.
3862 * This routine is called to process response IOCB with status field
3863 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3864 * based upon SCSI and FCP error.
3867 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3870 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3871 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3872 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3873 uint32_t resp_info = fcprsp->rspStatus2;
3874 uint32_t scsi_status = fcprsp->rspStatus3;
3876 uint32_t host_status = DID_OK;
3877 uint32_t rsplen = 0;
3879 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3883 * If this is a task management command, there is no
3884 * scsi packet associated with this lpfc_cmd. The driver
3887 if (fcpcmd->fcpCntl2) {
3892 if (resp_info & RSP_LEN_VALID) {
3893 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3894 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3896 "2719 Invalid response length: "
3897 "tgt x%x lun x%llx cmnd x%x rsplen "
3898 "x%x\n", cmnd->device->id,
3899 cmnd->device->lun, cmnd->cmnd[0],
3901 host_status = DID_ERROR;
3904 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3905 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3906 "2757 Protocol failure detected during "
3907 "processing of FCP I/O op: "
3908 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3910 cmnd->device->lun, cmnd->cmnd[0],
3912 host_status = DID_ERROR;
3917 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3918 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3919 if (snslen > SCSI_SENSE_BUFFERSIZE)
3920 snslen = SCSI_SENSE_BUFFERSIZE;
3922 if (resp_info & RSP_LEN_VALID)
3923 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3924 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3926 lp = (uint32_t *)cmnd->sense_buffer;
3928 /* special handling for under run conditions */
3929 if (!scsi_status && (resp_info & RESID_UNDER)) {
3930 /* don't log under runs if fcp set... */
3931 if (vport->cfg_log_verbose & LOG_FCP)
3932 logit = LOG_FCP_ERROR;
3933 /* unless operator says so */
3934 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3935 logit = LOG_FCP_UNDER;
3938 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3939 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3940 "Data: x%x x%x x%x x%x x%x\n",
3941 cmnd->cmnd[0], scsi_status,
3942 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3943 be32_to_cpu(fcprsp->rspResId),
3944 be32_to_cpu(fcprsp->rspSnsLen),
3945 be32_to_cpu(fcprsp->rspRspLen),
3948 scsi_set_resid(cmnd, 0);
3949 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3950 if (resp_info & RESID_UNDER) {
3951 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3954 "9025 FCP Underrun, expected %d, "
3955 "residual %d Data: x%x x%x x%x\n",
3957 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3961 * If there is an under run, check if under run reported by
3962 * storage array is same as the under run reported by HBA.
3963 * If this is not same, there is a dropped frame.
3965 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3966 lpfc_printf_vlog(vport, KERN_WARNING,
3967 LOG_FCP | LOG_FCP_ERROR,
3968 "9026 FCP Read Check Error "
3969 "and Underrun Data: x%x x%x x%x x%x\n",
3971 scsi_get_resid(cmnd), fcpi_parm,
3973 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3974 host_status = DID_ERROR;
3977 * The cmnd->underflow is the minimum number of bytes that must
3978 * be transferred for this command. Provided a sense condition
3979 * is not present, make sure the actual amount transferred is at
3980 * least the underflow value or fail.
3982 if (!(resp_info & SNS_LEN_VALID) &&
3983 (scsi_status == SAM_STAT_GOOD) &&
3984 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3985 < cmnd->underflow)) {
3986 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3987 "9027 FCP command x%x residual "
3988 "underrun converted to error "
3989 "Data: x%x x%x x%x\n",
3990 cmnd->cmnd[0], scsi_bufflen(cmnd),
3991 scsi_get_resid(cmnd), cmnd->underflow);
3992 host_status = DID_ERROR;
3994 } else if (resp_info & RESID_OVER) {
3995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3996 "9028 FCP command x%x residual overrun error. "
3997 "Data: x%x x%x\n", cmnd->cmnd[0],
3998 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3999 host_status = DID_ERROR;
4002 * Check SLI validation that all the transfer was actually done
4003 * (fcpi_parm should be zero). Apply check only to reads.
4005 } else if (fcpi_parm) {
4006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4007 "9029 FCP %s Check Error Data: "
4008 "x%x x%x x%x x%x x%x\n",
4009 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4011 fcpDl, be32_to_cpu(fcprsp->rspResId),
4012 fcpi_parm, cmnd->cmnd[0], scsi_status);
4014 /* There is some issue with the LPe12000 that causes it
4015 * to miscalculate the fcpi_parm and falsely trip this
4016 * recovery logic. Detect this case and don't error when true.
4018 if (fcpi_parm > fcpDl)
4021 switch (scsi_status) {
4023 case SAM_STAT_CHECK_CONDITION:
4024 /* Fabric dropped a data frame. Fail any successful
4025 * command in which we detected dropped frames.
4026 * A status of good or some check conditions could
4027 * be considered a successful command.
4029 host_status = DID_ERROR;
4032 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4036 cmnd->result = host_status << 16 | scsi_status;
4037 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4041 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4042 * @phba: The hba for which this call is being executed.
4043 * @pwqeIn: The command WQE for the scsi cmnd.
4044 * @wcqe: Pointer to driver response CQE object.
4046 * This routine assigns scsi command result by looking into response WQE
4047 * status field appropriately. This routine handles QUEUE FULL condition as
4048 * well by ramping down device queue depth.
4051 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4052 struct lpfc_wcqe_complete *wcqe)
4054 struct lpfc_io_buf *lpfc_cmd =
4055 (struct lpfc_io_buf *)pwqeIn->context1;
4056 struct lpfc_vport *vport = pwqeIn->vport;
4057 struct lpfc_rport_data *rdata;
4058 struct lpfc_nodelist *ndlp;
4059 struct scsi_cmnd *cmd;
4060 unsigned long flags;
4061 struct lpfc_fast_path_event *fast_path_evt;
4062 struct Scsi_Host *shost;
4063 u32 logit = LOG_FCP;
4065 unsigned long iflags = 0;
4068 /* Sanity check on return of outstanding command */
4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4071 "9032 Null lpfc_cmd pointer. No "
4072 "release, skip completion\n");
4076 rdata = lpfc_cmd->rdata;
4077 ndlp = rdata->pnode;
4079 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4080 /* TOREMOVE - currently this flag is checked during
4081 * the release of lpfc_iocbq. Remove once we move
4082 * to lpfc_wqe_job construct.
4084 * This needs to be done outside buf_lock
4086 spin_lock_irqsave(&phba->hbalock, iflags);
4087 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4088 spin_unlock_irqrestore(&phba->hbalock, iflags);
4091 /* Guard against abort handler being called at same time */
4092 spin_lock(&lpfc_cmd->buf_lock);
4094 /* Sanity check on return of outstanding command */
4095 cmd = lpfc_cmd->pCmd;
4097 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4098 "9042 I/O completion: Not an active IO\n");
4099 spin_unlock(&lpfc_cmd->buf_lock);
4100 lpfc_release_scsi_buf(phba, lpfc_cmd);
4103 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4104 if (phba->sli4_hba.hdwq)
4105 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4107 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4108 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4109 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4111 shost = cmd->device->host;
4113 status = bf_get(lpfc_wcqe_c_status, wcqe);
4114 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4115 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4117 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4118 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4119 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4120 if (phba->cfg_fcp_wait_abts_rsp)
4124 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4125 if (lpfc_cmd->prot_data_type) {
4126 struct scsi_dif_tuple *src = NULL;
4128 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4130 * Used to restore any changes to protection
4131 * data for error injection.
4133 switch (lpfc_cmd->prot_data_type) {
4134 case LPFC_INJERR_REFTAG:
4136 lpfc_cmd->prot_data;
4138 case LPFC_INJERR_APPTAG:
4140 (uint16_t)lpfc_cmd->prot_data;
4142 case LPFC_INJERR_GUARD:
4144 (uint16_t)lpfc_cmd->prot_data;
4150 lpfc_cmd->prot_data = 0;
4151 lpfc_cmd->prot_data_type = 0;
4152 lpfc_cmd->prot_data_segment = NULL;
4155 if (unlikely(lpfc_cmd->status)) {
4156 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4157 (lpfc_cmd->result & IOERR_DRVR_MASK))
4158 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4159 else if (lpfc_cmd->status >= IOSTAT_CNT)
4160 lpfc_cmd->status = IOSTAT_DEFAULT;
4161 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4162 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4163 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4164 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4167 logit = LOG_FCP | LOG_FCP_UNDER;
4168 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4169 "9034 FCP cmd x%x failed <%d/%lld> "
4170 "status: x%x result: x%x "
4171 "sid: x%x did: x%x oxid: x%x "
4172 "Data: x%x x%x x%x\n",
4174 cmd->device ? cmd->device->id : 0xffff,
4175 cmd->device ? cmd->device->lun : 0xffff,
4176 lpfc_cmd->status, lpfc_cmd->result,
4178 (ndlp) ? ndlp->nlp_DID : 0,
4179 lpfc_cmd->cur_iocbq.sli4_xritag,
4180 wcqe->parameter, wcqe->total_data_placed,
4181 lpfc_cmd->cur_iocbq.iotag);
4184 switch (lpfc_cmd->status) {
4185 case IOSTAT_SUCCESS:
4186 cmd->result = DID_OK << 16;
4188 case IOSTAT_FCP_RSP_ERROR:
4189 lpfc_handle_fcp_err(vport, lpfc_cmd,
4190 pwqeIn->wqe.fcp_iread.total_xfer_len -
4191 wcqe->total_data_placed);
4193 case IOSTAT_NPORT_BSY:
4194 case IOSTAT_FABRIC_BSY:
4195 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4196 fast_path_evt = lpfc_alloc_fast_evt(phba);
4199 fast_path_evt->un.fabric_evt.event_type =
4200 FC_REG_FABRIC_EVENT;
4201 fast_path_evt->un.fabric_evt.subcategory =
4202 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4203 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4205 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4206 &ndlp->nlp_portname,
4207 sizeof(struct lpfc_name));
4208 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4209 &ndlp->nlp_nodename,
4210 sizeof(struct lpfc_name));
4212 fast_path_evt->vport = vport;
4213 fast_path_evt->work_evt.evt =
4214 LPFC_EVT_FASTPATH_MGMT_EVT;
4215 spin_lock_irqsave(&phba->hbalock, flags);
4216 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4218 spin_unlock_irqrestore(&phba->hbalock, flags);
4219 lpfc_worker_wake_up(phba);
4220 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4221 "9035 Fabric/Node busy FCP cmd x%x failed"
4223 "status: x%x result: x%x "
4224 "sid: x%x did: x%x oxid: x%x "
4225 "Data: x%x x%x x%x\n",
4227 cmd->device ? cmd->device->id : 0xffff,
4228 cmd->device ? cmd->device->lun : 0xffff,
4229 lpfc_cmd->status, lpfc_cmd->result,
4231 (ndlp) ? ndlp->nlp_DID : 0,
4232 lpfc_cmd->cur_iocbq.sli4_xritag,
4234 wcqe->total_data_placed,
4235 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4237 case IOSTAT_REMOTE_STOP:
4239 /* This I/O was aborted by the target, we don't
4240 * know the rxid and because we did not send the
4241 * ABTS we cannot generate and RRQ.
4243 lpfc_set_rrq_active(phba, ndlp,
4244 lpfc_cmd->cur_iocbq.sli4_lxritag,
4248 case IOSTAT_LOCAL_REJECT:
4249 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4250 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4251 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4253 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4254 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4256 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4257 cmd->result = DID_NO_CONNECT << 16;
4260 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4261 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4262 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4263 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4264 cmd->result = DID_REQUEUE << 16;
4267 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4268 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4269 status == CQE_STATUS_DI_ERROR) {
4270 if (scsi_get_prot_op(cmd) !=
4273 * This is a response for a BG enabled
4274 * cmd. Parse BG error
4276 lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4280 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4281 "9040 non-zero BGSTAT on unprotected cmd\n");
4283 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4284 "9036 Local Reject FCP cmd x%x failed"
4286 "status: x%x result: x%x "
4287 "sid: x%x did: x%x oxid: x%x "
4288 "Data: x%x x%x x%x\n",
4290 cmd->device ? cmd->device->id : 0xffff,
4291 cmd->device ? cmd->device->lun : 0xffff,
4292 lpfc_cmd->status, lpfc_cmd->result,
4294 (ndlp) ? ndlp->nlp_DID : 0,
4295 lpfc_cmd->cur_iocbq.sli4_xritag,
4297 wcqe->total_data_placed,
4298 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4301 if (lpfc_cmd->status >= IOSTAT_CNT)
4302 lpfc_cmd->status = IOSTAT_DEFAULT;
4303 cmd->result = DID_ERROR << 16;
4304 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4305 "9037 FCP Completion Error: xri %x "
4306 "status x%x result x%x [x%x] "
4308 lpfc_cmd->cur_iocbq.sli4_xritag,
4309 lpfc_cmd->status, lpfc_cmd->result,
4311 wcqe->total_data_placed);
4313 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4314 u32 *lp = (u32 *)cmd->sense_buffer;
4316 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4317 "9039 Iodone <%d/%llu> cmd x%px, error "
4318 "x%x SNS x%x x%x Data: x%x x%x\n",
4319 cmd->device->id, cmd->device->lun, cmd,
4320 cmd->result, *lp, *(lp + 3), cmd->retries,
4321 scsi_get_resid(cmd));
4324 lpfc_update_stats(vport, lpfc_cmd);
4326 if (vport->cfg_max_scsicmpl_time &&
4327 time_after(jiffies, lpfc_cmd->start_time +
4328 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4329 spin_lock_irqsave(shost->host_lock, flags);
4331 if (ndlp->cmd_qdepth >
4332 atomic_read(&ndlp->cmd_pending) &&
4333 (atomic_read(&ndlp->cmd_pending) >
4334 LPFC_MIN_TGT_QDEPTH) &&
4335 (cmd->cmnd[0] == READ_10 ||
4336 cmd->cmnd[0] == WRITE_10))
4338 atomic_read(&ndlp->cmd_pending);
4340 ndlp->last_change_time = jiffies;
4342 spin_unlock_irqrestore(shost->host_lock, flags);
4344 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4346 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4347 if (lpfc_cmd->ts_cmd_start) {
4348 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4349 lpfc_cmd->ts_data_io = ktime_get_ns();
4350 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4351 lpfc_io_ktime(phba, lpfc_cmd);
4356 lpfc_cmd->pCmd = NULL;
4357 spin_unlock(&lpfc_cmd->buf_lock);
4359 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4360 cmd->scsi_done(cmd);
4363 * If there is an abort thread waiting for command completion
4364 * wake up the thread.
4366 spin_lock(&lpfc_cmd->buf_lock);
4367 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4368 if (lpfc_cmd->waitq)
4369 wake_up(lpfc_cmd->waitq);
4371 spin_unlock(&lpfc_cmd->buf_lock);
4372 lpfc_release_scsi_buf(phba, lpfc_cmd);
4376 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4377 * @phba: The Hba for which this call is being executed.
4378 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4379 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4381 * This routine assigns scsi command result by looking into response IOCB
4382 * status field appropriately. This routine handles QUEUE FULL condition as
4383 * well by ramping down device queue depth.
4386 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4387 struct lpfc_iocbq *pIocbOut)
4389 struct lpfc_io_buf *lpfc_cmd =
4390 (struct lpfc_io_buf *) pIocbIn->context1;
4391 struct lpfc_vport *vport = pIocbIn->vport;
4392 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4393 struct lpfc_nodelist *pnode = rdata->pnode;
4394 struct scsi_cmnd *cmd;
4395 unsigned long flags;
4396 struct lpfc_fast_path_event *fast_path_evt;
4397 struct Scsi_Host *shost;
4399 uint32_t logit = LOG_FCP;
4401 /* Guard against abort handler being called at same time */
4402 spin_lock(&lpfc_cmd->buf_lock);
4404 /* Sanity check on return of outstanding command */
4405 cmd = lpfc_cmd->pCmd;
4406 if (!cmd || !phba) {
4407 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4408 "2621 IO completion: Not an active IO\n");
4409 spin_unlock(&lpfc_cmd->buf_lock);
4413 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4414 if (phba->sli4_hba.hdwq)
4415 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4417 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4418 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4419 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4421 shost = cmd->device->host;
4423 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4424 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4425 /* pick up SLI4 exchange busy status from HBA */
4426 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4427 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4428 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4430 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4431 if (lpfc_cmd->prot_data_type) {
4432 struct scsi_dif_tuple *src = NULL;
4434 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4436 * Used to restore any changes to protection
4437 * data for error injection.
4439 switch (lpfc_cmd->prot_data_type) {
4440 case LPFC_INJERR_REFTAG:
4442 lpfc_cmd->prot_data;
4444 case LPFC_INJERR_APPTAG:
4446 (uint16_t)lpfc_cmd->prot_data;
4448 case LPFC_INJERR_GUARD:
4450 (uint16_t)lpfc_cmd->prot_data;
4456 lpfc_cmd->prot_data = 0;
4457 lpfc_cmd->prot_data_type = 0;
4458 lpfc_cmd->prot_data_segment = NULL;
4462 if (unlikely(lpfc_cmd->status)) {
4463 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4464 (lpfc_cmd->result & IOERR_DRVR_MASK))
4465 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4466 else if (lpfc_cmd->status >= IOSTAT_CNT)
4467 lpfc_cmd->status = IOSTAT_DEFAULT;
4468 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4469 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4470 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4471 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4474 logit = LOG_FCP | LOG_FCP_UNDER;
4475 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4476 "9030 FCP cmd x%x failed <%d/%lld> "
4477 "status: x%x result: x%x "
4478 "sid: x%x did: x%x oxid: x%x "
4481 cmd->device ? cmd->device->id : 0xffff,
4482 cmd->device ? cmd->device->lun : 0xffff,
4483 lpfc_cmd->status, lpfc_cmd->result,
4485 (pnode) ? pnode->nlp_DID : 0,
4486 phba->sli_rev == LPFC_SLI_REV4 ?
4487 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4488 pIocbOut->iocb.ulpContext,
4489 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4491 switch (lpfc_cmd->status) {
4492 case IOSTAT_FCP_RSP_ERROR:
4493 /* Call FCP RSP handler to determine result */
4494 lpfc_handle_fcp_err(vport, lpfc_cmd,
4495 pIocbOut->iocb.un.fcpi.fcpi_parm);
4497 case IOSTAT_NPORT_BSY:
4498 case IOSTAT_FABRIC_BSY:
4499 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4500 fast_path_evt = lpfc_alloc_fast_evt(phba);
4503 fast_path_evt->un.fabric_evt.event_type =
4504 FC_REG_FABRIC_EVENT;
4505 fast_path_evt->un.fabric_evt.subcategory =
4506 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4507 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4509 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4510 &pnode->nlp_portname,
4511 sizeof(struct lpfc_name));
4512 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4513 &pnode->nlp_nodename,
4514 sizeof(struct lpfc_name));
4516 fast_path_evt->vport = vport;
4517 fast_path_evt->work_evt.evt =
4518 LPFC_EVT_FASTPATH_MGMT_EVT;
4519 spin_lock_irqsave(&phba->hbalock, flags);
4520 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4522 spin_unlock_irqrestore(&phba->hbalock, flags);
4523 lpfc_worker_wake_up(phba);
4525 case IOSTAT_LOCAL_REJECT:
4526 case IOSTAT_REMOTE_STOP:
4527 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4529 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4530 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4532 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4533 cmd->result = DID_NO_CONNECT << 16;
4536 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4537 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4538 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4539 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4540 cmd->result = DID_REQUEUE << 16;
4543 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4544 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4545 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4546 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4548 * This is a response for a BG enabled
4549 * cmd. Parse BG error
4551 lpfc_parse_bg_err(phba, lpfc_cmd,
4555 lpfc_printf_vlog(vport, KERN_WARNING,
4557 "9031 non-zero BGSTAT "
4558 "on unprotected cmd\n");
4561 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4562 && (phba->sli_rev == LPFC_SLI_REV4)
4564 /* This IO was aborted by the target, we don't
4565 * know the rxid and because we did not send the
4566 * ABTS we cannot generate and RRQ.
4568 lpfc_set_rrq_active(phba, pnode,
4569 lpfc_cmd->cur_iocbq.sli4_lxritag,
4574 cmd->result = DID_ERROR << 16;
4578 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4579 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4582 cmd->result = DID_OK << 16;
4584 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4585 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4587 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4588 "0710 Iodone <%d/%llu> cmd x%px, error "
4589 "x%x SNS x%x x%x Data: x%x x%x\n",
4590 cmd->device->id, cmd->device->lun, cmd,
4591 cmd->result, *lp, *(lp + 3), cmd->retries,
4592 scsi_get_resid(cmd));
4595 lpfc_update_stats(vport, lpfc_cmd);
4596 if (vport->cfg_max_scsicmpl_time &&
4597 time_after(jiffies, lpfc_cmd->start_time +
4598 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4599 spin_lock_irqsave(shost->host_lock, flags);
4601 if (pnode->cmd_qdepth >
4602 atomic_read(&pnode->cmd_pending) &&
4603 (atomic_read(&pnode->cmd_pending) >
4604 LPFC_MIN_TGT_QDEPTH) &&
4605 ((cmd->cmnd[0] == READ_10) ||
4606 (cmd->cmnd[0] == WRITE_10)))
4608 atomic_read(&pnode->cmd_pending);
4610 pnode->last_change_time = jiffies;
4612 spin_unlock_irqrestore(shost->host_lock, flags);
4614 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4616 lpfc_cmd->pCmd = NULL;
4617 spin_unlock(&lpfc_cmd->buf_lock);
4619 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4620 if (lpfc_cmd->ts_cmd_start) {
4621 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4622 lpfc_cmd->ts_data_io = ktime_get_ns();
4623 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4624 lpfc_io_ktime(phba, lpfc_cmd);
4628 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4629 cmd->scsi_done(cmd);
4632 * If there is an abort thread waiting for command completion
4633 * wake up the thread.
4635 spin_lock(&lpfc_cmd->buf_lock);
4636 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4637 if (lpfc_cmd->waitq)
4638 wake_up(lpfc_cmd->waitq);
4639 spin_unlock(&lpfc_cmd->buf_lock);
4641 lpfc_release_scsi_buf(phba, lpfc_cmd);
4645 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4646 * @vport: Pointer to vport object.
4647 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4648 * @tmo: timeout value for the IO
4650 * Based on the data-direction of the command, initialize IOCB
4651 * in the I/O buffer. Fill in the IOCB fields which are independent
4652 * of the scsi buffer
4654 * RETURNS 0 - SUCCESS,
4656 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4657 struct lpfc_io_buf *lpfc_cmd,
4660 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4661 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4662 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4663 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4664 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4665 int datadir = scsi_cmnd->sc_data_direction;
4668 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4671 * There are three possibilities here - use scatter-gather segment, use
4672 * the single mapping, or neither. Start the lpfc command prep by
4673 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4676 if (scsi_sg_count(scsi_cmnd)) {
4677 if (datadir == DMA_TO_DEVICE) {
4678 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4679 iocb_cmd->ulpPU = PARM_READ_CHECK;
4680 if (vport->cfg_first_burst_size &&
4681 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4684 fcpdl = scsi_bufflen(scsi_cmnd);
4685 xrdy_len = min(fcpdl,
4686 vport->cfg_first_burst_size);
4687 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4689 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4691 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4692 iocb_cmd->ulpPU = PARM_READ_CHECK;
4693 fcp_cmnd->fcpCntl3 = READ_DATA;
4696 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4697 iocb_cmd->un.fcpi.fcpi_parm = 0;
4698 iocb_cmd->ulpPU = 0;
4699 fcp_cmnd->fcpCntl3 = 0;
4703 * Finish initializing those IOCB fields that are independent
4704 * of the scsi_cmnd request_buffer
4706 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4707 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4708 piocbq->iocb.ulpFCP2Rcvy = 1;
4710 piocbq->iocb.ulpFCP2Rcvy = 0;
4712 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4713 piocbq->context1 = lpfc_cmd;
4714 if (!piocbq->iocb_cmpl)
4715 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4716 piocbq->iocb.ulpTimeout = tmo;
4717 piocbq->vport = vport;
4722 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4723 * @vport: Pointer to vport object.
4724 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4725 * @tmo: timeout value for the IO
4727 * Based on the data-direction of the command copy WQE template
4728 * to I/O buffer WQE. Fill in the WQE fields which are independent
4729 * of the scsi buffer
4731 * RETURNS 0 - SUCCESS,
4733 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4734 struct lpfc_io_buf *lpfc_cmd,
4737 struct lpfc_hba *phba = vport->phba;
4738 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4739 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4740 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4741 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4742 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4743 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4744 u16 idx = lpfc_cmd->hdwq_no;
4745 int datadir = scsi_cmnd->sc_data_direction;
4747 hdwq = &phba->sli4_hba.hdwq[idx];
4749 /* Initialize 64 bytes only */
4750 memset(wqe, 0, sizeof(union lpfc_wqe128));
4753 * There are three possibilities here - use scatter-gather segment, use
4754 * the single mapping, or neither.
4756 if (scsi_sg_count(scsi_cmnd)) {
4757 if (datadir == DMA_TO_DEVICE) {
4758 /* From the iwrite template, initialize words 7 - 11 */
4759 memcpy(&wqe->words[7],
4760 &lpfc_iwrite_cmd_template.words[7],
4761 sizeof(uint32_t) * 5);
4763 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4765 hdwq->scsi_cstat.output_requests++;
4767 /* From the iread template, initialize words 7 - 11 */
4768 memcpy(&wqe->words[7],
4769 &lpfc_iread_cmd_template.words[7],
4770 sizeof(uint32_t) * 5);
4773 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4775 fcp_cmnd->fcpCntl3 = READ_DATA;
4777 hdwq->scsi_cstat.input_requests++;
4780 /* From the icmnd template, initialize words 4 - 11 */
4781 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4782 sizeof(uint32_t) * 8);
4785 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4787 fcp_cmnd->fcpCntl3 = 0;
4789 hdwq->scsi_cstat.control_requests++;
4793 * Finish initializing those WQE fields that are independent
4794 * of the request_buffer
4798 bf_set(payload_offset_len, &wqe->fcp_icmd,
4799 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4802 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4803 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4804 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4807 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4808 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4810 bf_set(wqe_class, &wqe->generic.wqe_com,
4811 (pnode->nlp_fcp_info & 0x0f));
4814 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4817 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4819 pwqeq->vport = vport;
4820 pwqeq->vport = vport;
4821 pwqeq->context1 = lpfc_cmd;
4822 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4823 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4829 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4830 * @vport: The virtual port for which this call is being executed.
4831 * @lpfc_cmd: The scsi command which needs to send.
4832 * @pnode: Pointer to lpfc_nodelist.
4834 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4835 * to transfer for device with SLI3 interface spec.
4838 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4839 struct lpfc_nodelist *pnode)
4841 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4842 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4848 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4849 /* clear task management bits */
4850 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4852 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4853 &lpfc_cmd->fcp_cmnd->fcp_lun);
4855 ptr = &fcp_cmnd->fcpCdb[0];
4856 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4857 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4858 ptr += scsi_cmnd->cmd_len;
4859 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4862 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4864 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4870 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4871 * @vport: The virtual port for which this call is being executed.
4872 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4873 * @lun: Logical unit number.
4874 * @task_mgmt_cmd: SCSI task management command.
4876 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4877 * for device with SLI-3 interface spec.
4884 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4885 struct lpfc_io_buf *lpfc_cmd,
4887 uint8_t task_mgmt_cmd)
4889 struct lpfc_iocbq *piocbq;
4891 struct fcp_cmnd *fcp_cmnd;
4892 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4893 struct lpfc_nodelist *ndlp = rdata->pnode;
4895 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4898 piocbq = &(lpfc_cmd->cur_iocbq);
4899 piocbq->vport = vport;
4901 piocb = &piocbq->iocb;
4903 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4904 /* Clear out any old data in the FCP command area */
4905 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4906 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4907 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4908 if (vport->phba->sli_rev == 3 &&
4909 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4910 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4911 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4912 piocb->ulpContext = ndlp->nlp_rpi;
4913 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4915 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4917 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4918 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4920 piocb->un.fcpi.fcpi_parm = 0;
4922 /* ulpTimeout is only one byte */
4923 if (lpfc_cmd->timeout > 0xff) {
4925 * Do not timeout the command at the firmware level.
4926 * The driver will provide the timeout mechanism.
4928 piocb->ulpTimeout = 0;
4930 piocb->ulpTimeout = lpfc_cmd->timeout;
4932 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4933 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4939 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4940 * @phba: The hba struct for which this call is being executed.
4941 * @dev_grp: The HBA PCI-Device group number.
4943 * This routine sets up the SCSI interface API function jump table in @phba
4945 * Returns: 0 - success, -ENODEV - failure.
4948 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4951 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4954 case LPFC_PCI_DEV_LP:
4955 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4956 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4957 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4958 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4959 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4961 case LPFC_PCI_DEV_OC:
4962 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4963 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4964 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4965 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4966 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4970 "1418 Invalid HBA PCI-device group: 0x%x\n",
4974 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4975 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4980 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
4981 * @phba: The Hba for which this call is being executed.
4982 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4983 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4985 * This routine is IOCB completion routine for device reset and target reset
4986 * routine. This routine release scsi buffer associated with lpfc_cmd.
4989 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4990 struct lpfc_iocbq *cmdiocbq,
4991 struct lpfc_iocbq *rspiocbq)
4993 struct lpfc_io_buf *lpfc_cmd =
4994 (struct lpfc_io_buf *) cmdiocbq->context1;
4996 lpfc_release_scsi_buf(phba, lpfc_cmd);
5001 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5002 * if issuing a pci_bus_reset is possibly unsafe
5003 * @phba: lpfc_hba pointer.
5006 * Walks the bus_list to ensure only PCI devices with Emulex
5007 * vendor id, device ids that support hot reset, and only one occurrence
5011 * -EBADSLT, detected invalid device
5015 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5017 const struct pci_dev *pdev = phba->pcidev;
5018 struct pci_dev *ptr = NULL;
5021 /* Walk the list of devices on the pci_dev's bus */
5022 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5023 /* Check for Emulex Vendor ID */
5024 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5025 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5026 "8346 Non-Emulex vendor found: "
5027 "0x%04x\n", ptr->vendor);
5031 /* Check for valid Emulex Device ID */
5032 switch (ptr->device) {
5033 case PCI_DEVICE_ID_LANCER_FC:
5034 case PCI_DEVICE_ID_LANCER_G6_FC:
5035 case PCI_DEVICE_ID_LANCER_G7_FC:
5038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5039 "8347 Incapable PCI reset device: "
5040 "0x%04x\n", ptr->device);
5044 /* Check for only one function 0 ID to ensure only one HBA on
5047 if (ptr->devfn == 0) {
5048 if (++counter > 1) {
5049 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5050 "8348 More than one device on "
5051 "secondary bus found\n");
5061 * lpfc_info - Info entry point of scsi_host_template data structure
5062 * @host: The scsi host for which this call is being executed.
5064 * This routine provides module information about hba.
5067 * Pointer to char - Success.
5070 lpfc_info(struct Scsi_Host *host)
5072 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5073 struct lpfc_hba *phba = vport->phba;
5075 static char lpfcinfobuf[384];
5076 char tmp[384] = {0};
5078 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5079 if (phba && phba->pcidev){
5080 /* Model Description */
5081 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5082 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5083 sizeof(lpfcinfobuf))
5087 scnprintf(tmp, sizeof(tmp),
5088 " on PCI bus %02x device %02x irq %d",
5089 phba->pcidev->bus->number, phba->pcidev->devfn,
5091 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5092 sizeof(lpfcinfobuf))
5096 if (phba->Port[0]) {
5097 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5098 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5099 sizeof(lpfcinfobuf))
5104 link_speed = lpfc_sli_port_speed_get(phba);
5105 if (link_speed != 0) {
5106 scnprintf(tmp, sizeof(tmp),
5107 " Logical Link Speed: %d Mbps", link_speed);
5108 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5109 sizeof(lpfcinfobuf))
5113 /* PCI resettable */
5114 if (!lpfc_check_pci_resettable(phba)) {
5115 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5116 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5125 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5126 * @phba: The Hba for which this call is being executed.
5128 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5129 * The default value of cfg_poll_tmo is 10 milliseconds.
5131 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5133 unsigned long poll_tmo_expires =
5134 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5136 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5137 mod_timer(&phba->fcp_poll_timer,
5142 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5143 * @phba: The Hba for which this call is being executed.
5145 * This routine starts the fcp_poll_timer of @phba.
5147 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5149 lpfc_poll_rearm_timer(phba);
5153 * lpfc_poll_timeout - Restart polling timer
5154 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5156 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5157 * and FCP Ring interrupt is disable.
5159 void lpfc_poll_timeout(struct timer_list *t)
5161 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5163 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5164 lpfc_sli_handle_fast_ring_event(phba,
5165 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5167 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5168 lpfc_poll_rearm_timer(phba);
5173 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
5174 * @vport: The virtual port for which this call is being executed.
5175 * @hash: calculated hash value
5176 * @buf: uuid associated with the VE
5177 * Return the VMID entry associated with the UUID
5178 * Make sure to acquire the appropriate lock before invoking this routine.
5180 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
5183 struct lpfc_vmid *vmp;
5185 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
5186 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
5193 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
5194 * @vport: The virtual port for which this call is being executed.
5195 * @hash - calculated hash value
5196 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5198 * This routine will insert the newly acquired VMID entity in the hash table.
5199 * Make sure to acquire the appropriate lock before invoking this routine.
5202 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
5203 struct lpfc_vmid *vmp)
5205 hash_add(vport->hash_table, &vmp->hnode, hash);
5209 * lpfc_vmid_hash_fn - create a hash value of the UUID
5210 * @vmid: uuid associated with the VE
5211 * @len: length of the VMID string
5212 * Returns the calculated hash value
5214 int lpfc_vmid_hash_fn(const char *vmid, int len)
5223 if (c >= 'A' && c <= 'Z')
5226 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
5227 (c >> LPFC_VMID_HASH_SHIFT)) * 19;
5230 return hash & LPFC_VMID_HASH_MASK;
5234 * lpfc_vmid_update_entry - update the vmid entry in the hash table
5235 * @vport: The virtual port for which this call is being executed.
5236 * @cmd: address of scsi cmd descriptor
5237 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5240 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
5241 *cmd, struct lpfc_vmid *vmp,
5242 union lpfc_vmid_io_tag *tag)
5246 if (vport->vmid_priority_tagging)
5247 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
5249 tag->app_id = vmp->un.app_id;
5251 if (cmd->sc_data_direction == DMA_TO_DEVICE)
5256 /* update the last access timestamp in the table */
5257 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
5261 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
5262 struct lpfc_vmid *vmid)
5265 struct lpfc_vmid *pvmid;
5267 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5268 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5270 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
5272 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
5275 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
5277 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5282 * lpfc_vmid_get_appid - get the VMID associated with the UUID
5283 * @vport: The virtual port for which this call is being executed.
5284 * @uuid: UUID associated with the VE
5285 * @cmd: address of scsi_cmd descriptor
5287 * Returns status of the function
5289 static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
5290 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
5292 struct lpfc_vmid *vmp = NULL;
5293 int hash, len, rc, i;
5295 /* check if QFPA is complete */
5296 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
5297 LPFC_VMID_QFPA_CMPL)) {
5298 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5302 /* search if the UUID has already been mapped to the VMID */
5304 hash = lpfc_vmid_hash_fn(uuid, len);
5306 /* search for the VMID in the table */
5307 read_lock(&vport->vmid_lock);
5308 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5310 /* if found, check if its already registered */
5311 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5312 read_unlock(&vport->vmid_lock);
5313 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5315 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
5316 vmp->flag & LPFC_VMID_DE_REGISTER)) {
5317 /* else if register or dereg request has already been sent */
5318 /* Hence VMID tag will not be added for this I/O */
5319 read_unlock(&vport->vmid_lock);
5322 /* The VMID was not found in the hashtable. At this point, */
5323 /* drop the read lock first before proceeding further */
5324 read_unlock(&vport->vmid_lock);
5325 /* start the process to obtain one as per the */
5326 /* type of the VMID indicated */
5327 write_lock(&vport->vmid_lock);
5328 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5330 /* while the read lock was released, in case the entry was */
5331 /* added by other context or is in process of being added */
5332 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5333 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5334 write_unlock(&vport->vmid_lock);
5336 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
5337 write_unlock(&vport->vmid_lock);
5341 /* else search and allocate a free slot in the hash table */
5342 if (vport->cur_vmid_cnt < vport->max_vmid) {
5343 for (i = 0; i < vport->max_vmid; i++) {
5344 vmp = vport->vmid + i;
5345 if (vmp->flag == LPFC_VMID_SLOT_FREE)
5348 if (i == vport->max_vmid)
5355 write_unlock(&vport->vmid_lock);
5359 /* Add the vmid and register */
5360 lpfc_put_vmid_in_hashtable(vport, hash, vmp);
5361 vmp->vmid_len = len;
5362 memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
5365 vmp->flag = LPFC_VMID_SLOT_USED;
5367 vmp->delete_inactive =
5368 vport->vmid_inactivity_timeout ? 1 : 0;
5370 /* if type priority tag, get next available VMID */
5371 if (lpfc_vmid_is_type_priority_tag(vport))
5372 lpfc_vmid_assign_cs_ctl(vport, vmp);
5374 /* allocate the per cpu variable for holding */
5375 /* the last access time stamp only if VMID is enabled */
5376 if (!vmp->last_io_time)
5377 vmp->last_io_time = __alloc_percpu(sizeof(u64),
5380 if (!vmp->last_io_time) {
5381 hash_del(&vmp->hnode);
5382 vmp->flag = LPFC_VMID_SLOT_FREE;
5383 write_unlock(&vport->vmid_lock);
5387 write_unlock(&vport->vmid_lock);
5389 /* complete transaction with switch */
5390 if (lpfc_vmid_is_type_priority_tag(vport))
5391 rc = lpfc_vmid_uvem(vport, vmp, true);
5393 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
5395 write_lock(&vport->vmid_lock);
5396 vport->cur_vmid_cnt++;
5397 vmp->flag |= LPFC_VMID_REQ_REGISTER;
5398 write_unlock(&vport->vmid_lock);
5400 write_lock(&vport->vmid_lock);
5401 hash_del(&vmp->hnode);
5402 vmp->flag = LPFC_VMID_SLOT_FREE;
5403 free_percpu(vmp->last_io_time);
5404 write_unlock(&vport->vmid_lock);
5408 /* finally, enable the idle timer once */
5409 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
5410 mod_timer(&vport->phba->inactive_vmid_poll,
5412 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
5413 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
5420 * lpfc_is_command_vm_io - get the UUID from blk cgroup
5421 * @cmd: Pointer to scsi_cmnd data structure
5422 * Returns UUID if present, otherwise NULL
5424 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5429 if (cmd->request->bio)
5430 uuid = blkcg_get_fc_appid(cmd->request->bio);
5436 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5437 * @shost: kernel scsi host pointer.
5438 * @cmnd: Pointer to scsi_cmnd data structure.
5440 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5441 * This routine prepares an IOCB from scsi command and provides to firmware.
5442 * The @done callback is invoked after driver finished processing the command.
5446 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5449 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5452 struct lpfc_hba *phba = vport->phba;
5453 struct lpfc_rport_data *rdata;
5454 struct lpfc_nodelist *ndlp;
5455 struct lpfc_io_buf *lpfc_cmd;
5456 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5459 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5460 uint64_t start = 0L;
5463 start = ktime_get_ns();
5466 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5468 /* sanity check on references */
5469 if (unlikely(!rdata) || unlikely(!rport))
5470 goto out_fail_command;
5472 err = fc_remote_port_chkready(rport);
5475 goto out_fail_command;
5477 ndlp = rdata->pnode;
5479 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5480 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5483 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5484 " op:%02x str=%s without registering for"
5485 " BlockGuard - Rejecting command\n",
5486 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5487 dif_op_str[scsi_get_prot_op(cmnd)]);
5488 goto out_fail_command;
5492 * Catch race where our node has transitioned, but the
5493 * transport is still transitioning.
5497 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5498 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5499 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5500 "3377 Target Queue Full, scsi Id:%d "
5501 "Qdepth:%d Pending command:%d"
5502 " WWNN:%02x:%02x:%02x:%02x:"
5503 "%02x:%02x:%02x:%02x, "
5504 " WWPN:%02x:%02x:%02x:%02x:"
5505 "%02x:%02x:%02x:%02x",
5506 ndlp->nlp_sid, ndlp->cmd_qdepth,
5507 atomic_read(&ndlp->cmd_pending),
5508 ndlp->nlp_nodename.u.wwn[0],
5509 ndlp->nlp_nodename.u.wwn[1],
5510 ndlp->nlp_nodename.u.wwn[2],
5511 ndlp->nlp_nodename.u.wwn[3],
5512 ndlp->nlp_nodename.u.wwn[4],
5513 ndlp->nlp_nodename.u.wwn[5],
5514 ndlp->nlp_nodename.u.wwn[6],
5515 ndlp->nlp_nodename.u.wwn[7],
5516 ndlp->nlp_portname.u.wwn[0],
5517 ndlp->nlp_portname.u.wwn[1],
5518 ndlp->nlp_portname.u.wwn[2],
5519 ndlp->nlp_portname.u.wwn[3],
5520 ndlp->nlp_portname.u.wwn[4],
5521 ndlp->nlp_portname.u.wwn[5],
5522 ndlp->nlp_portname.u.wwn[6],
5523 ndlp->nlp_portname.u.wwn[7]);
5528 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5529 if (lpfc_cmd == NULL) {
5530 lpfc_rampdown_queue_depth(phba);
5532 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5533 "0707 driver's buffer pool is empty, "
5539 * Store the midlayer's command structure for the completion phase
5540 * and complete the command initialization.
5542 lpfc_cmd->pCmd = cmnd;
5543 lpfc_cmd->rdata = rdata;
5544 lpfc_cmd->ndlp = ndlp;
5545 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
5546 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5548 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5550 goto out_host_busy_release_buf;
5552 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5553 if (vport->phba->cfg_enable_bg) {
5554 lpfc_printf_vlog(vport,
5555 KERN_INFO, LOG_SCSI_CMD,
5556 "9033 BLKGRD: rcvd %s cmd:x%x "
5557 "reftag x%x cnt %u pt %x\n",
5558 dif_op_str[scsi_get_prot_op(cmnd)],
5560 t10_pi_ref_tag(cmnd->request),
5561 blk_rq_sectors(cmnd->request),
5562 (cmnd->cmnd[1]>>5));
5564 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5566 if (vport->phba->cfg_enable_bg) {
5567 lpfc_printf_vlog(vport,
5568 KERN_INFO, LOG_SCSI_CMD,
5569 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5570 "x%x reftag x%x cnt %u pt %x\n",
5572 t10_pi_ref_tag(cmnd->request),
5573 blk_rq_sectors(cmnd->request),
5574 (cmnd->cmnd[1]>>5));
5576 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5579 if (unlikely(err)) {
5581 cmnd->result = DID_ERROR << 16;
5582 goto out_fail_command_release_buf;
5584 goto out_host_busy_free_buf;
5588 /* check the necessary and sufficient condition to support VMID */
5589 if (lpfc_is_vmid_enabled(phba) &&
5590 (ndlp->vmid_support ||
5591 phba->pport->vmid_priority_tagging ==
5592 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5593 /* is the I/O generated by a VM, get the associated virtual */
5595 uuid = lpfc_is_command_vm_io(cmnd);
5598 err = lpfc_vmid_get_appid(vport, uuid, cmnd,
5599 (union lpfc_vmid_io_tag *)
5600 &lpfc_cmd->cur_iocbq.vmid_tag);
5602 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
5606 atomic_inc(&ndlp->cmd_pending);
5607 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5608 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5609 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5611 /* Issue I/O to adapter */
5612 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5613 &lpfc_cmd->cur_iocbq,
5615 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5617 lpfc_cmd->ts_cmd_start = start;
5618 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5619 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5621 lpfc_cmd->ts_cmd_start = 0;
5625 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5626 "3376 FCP could not issue IOCB err %x "
5627 "FCP cmd x%x <%d/%llu> "
5628 "sid: x%x did: x%x oxid: x%x "
5629 "Data: x%x x%x x%x x%x\n",
5631 cmnd->device ? cmnd->device->id : 0xffff,
5632 cmnd->device ? cmnd->device->lun : (u64)-1,
5633 vport->fc_myDID, ndlp->nlp_DID,
5634 phba->sli_rev == LPFC_SLI_REV4 ?
5635 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5636 phba->sli_rev == LPFC_SLI_REV4 ?
5637 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5638 lpfc_cmd->cur_iocbq.iocb.ulpContext,
5639 lpfc_cmd->cur_iocbq.iotag,
5640 phba->sli_rev == LPFC_SLI_REV4 ?
5642 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5643 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
5645 (cmnd->request->timeout / 1000));
5647 goto out_host_busy_free_buf;
5650 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5651 lpfc_sli_handle_fast_ring_event(phba,
5652 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5654 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5655 lpfc_poll_rearm_timer(phba);
5658 if (phba->cfg_xri_rebalancing)
5659 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5663 out_host_busy_free_buf:
5664 idx = lpfc_cmd->hdwq_no;
5665 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5666 if (phba->sli4_hba.hdwq) {
5667 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5669 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5672 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5675 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5678 out_host_busy_release_buf:
5679 lpfc_release_scsi_buf(phba, lpfc_cmd);
5681 return SCSI_MLQUEUE_HOST_BUSY;
5684 return SCSI_MLQUEUE_TARGET_BUSY;
5686 out_fail_command_release_buf:
5687 lpfc_release_scsi_buf(phba, lpfc_cmd);
5690 cmnd->scsi_done(cmnd);
5695 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5696 * @vport: The virtual port for which this call is being executed.
5698 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5701 struct lpfc_vmid *cur;
5703 if (vport->port_type == LPFC_PHYSICAL_PORT)
5704 del_timer_sync(&vport->phba->inactive_vmid_poll);
5706 kfree(vport->qfpa_res);
5707 kfree(vport->vmid_priority.vmid_range);
5710 if (!hash_empty(vport->hash_table))
5711 hash_for_each(vport->hash_table, bucket, cur, hnode)
5712 hash_del(&cur->hnode);
5714 vport->qfpa_res = NULL;
5715 vport->vmid_priority.vmid_range = NULL;
5717 vport->cur_vmid_cnt = 0;
5721 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5722 * @cmnd: Pointer to scsi_cmnd data structure.
5724 * This routine aborts @cmnd pending in base driver.
5731 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5733 struct Scsi_Host *shost = cmnd->device->host;
5734 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5735 struct lpfc_hba *phba = vport->phba;
5736 struct lpfc_iocbq *iocb;
5737 struct lpfc_io_buf *lpfc_cmd;
5738 int ret = SUCCESS, status = 0;
5739 struct lpfc_sli_ring *pring_s4 = NULL;
5740 struct lpfc_sli_ring *pring = NULL;
5742 unsigned long flags;
5743 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5745 status = fc_block_scsi_eh(cmnd);
5746 if (status != 0 && status != SUCCESS)
5749 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5753 spin_lock_irqsave(&phba->hbalock, flags);
5754 /* driver queued commands are in process of being flushed */
5755 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5756 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5757 "3168 SCSI Layer abort requested I/O has been "
5758 "flushed by LLD.\n");
5763 /* Guard against IO completion being called at same time */
5764 spin_lock(&lpfc_cmd->buf_lock);
5766 if (!lpfc_cmd->pCmd) {
5767 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5768 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5769 "x%x ID %d LUN %llu\n",
5770 SUCCESS, cmnd->device->id, cmnd->device->lun);
5771 goto out_unlock_buf;
5774 iocb = &lpfc_cmd->cur_iocbq;
5775 if (phba->sli_rev == LPFC_SLI_REV4) {
5776 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5779 goto out_unlock_buf;
5781 spin_lock(&pring_s4->ring_lock);
5783 /* the command is in process of being cancelled */
5784 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
5785 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5786 "3169 SCSI Layer abort requested I/O has been "
5787 "cancelled by LLD.\n");
5789 goto out_unlock_ring;
5792 * If pCmd field of the corresponding lpfc_io_buf structure
5793 * points to a different SCSI command, then the driver has
5794 * already completed this command, but the midlayer did not
5795 * see the completion before the eh fired. Just return SUCCESS.
5797 if (lpfc_cmd->pCmd != cmnd) {
5798 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5799 "3170 SCSI Layer abort requested I/O has been "
5800 "completed by LLD.\n");
5801 goto out_unlock_ring;
5804 BUG_ON(iocb->context1 != lpfc_cmd);
5806 /* abort issued in recovery is still in progress */
5807 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5808 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5809 "3389 SCSI Layer I/O Abort Request is pending\n");
5810 if (phba->sli_rev == LPFC_SLI_REV4)
5811 spin_unlock(&pring_s4->ring_lock);
5812 spin_unlock(&lpfc_cmd->buf_lock);
5813 spin_unlock_irqrestore(&phba->hbalock, flags);
5817 lpfc_cmd->waitq = &waitq;
5818 if (phba->sli_rev == LPFC_SLI_REV4) {
5819 spin_unlock(&pring_s4->ring_lock);
5820 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5821 lpfc_sli4_abort_fcp_cmpl);
5823 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5824 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5825 lpfc_sli_abort_fcp_cmpl);
5828 /* Make sure HBA is alive */
5829 lpfc_issue_hb_tmo(phba);
5831 if (ret_val != IOCB_SUCCESS) {
5832 /* Indicate the IO is not being aborted by the driver. */
5833 lpfc_cmd->waitq = NULL;
5834 spin_unlock(&lpfc_cmd->buf_lock);
5835 spin_unlock_irqrestore(&phba->hbalock, flags);
5840 /* no longer need the lock after this point */
5841 spin_unlock(&lpfc_cmd->buf_lock);
5842 spin_unlock_irqrestore(&phba->hbalock, flags);
5844 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5845 lpfc_sli_handle_fast_ring_event(phba,
5846 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5850 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
5851 * for abort to complete.
5853 wait_event_timeout(waitq,
5854 (lpfc_cmd->pCmd != cmnd),
5855 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5857 spin_lock(&lpfc_cmd->buf_lock);
5859 if (lpfc_cmd->pCmd == cmnd) {
5861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5862 "0748 abort handler timed out waiting "
5863 "for aborting I/O (xri:x%x) to complete: "
5864 "ret %#x, ID %d, LUN %llu\n",
5865 iocb->sli4_xritag, ret,
5866 cmnd->device->id, cmnd->device->lun);
5869 lpfc_cmd->waitq = NULL;
5871 spin_unlock(&lpfc_cmd->buf_lock);
5875 if (phba->sli_rev == LPFC_SLI_REV4)
5876 spin_unlock(&pring_s4->ring_lock);
5878 spin_unlock(&lpfc_cmd->buf_lock);
5880 spin_unlock_irqrestore(&phba->hbalock, flags);
5882 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5883 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5884 "LUN %llu\n", ret, cmnd->device->id,
5890 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5892 switch (task_mgmt_cmd) {
5893 case FCP_ABORT_TASK_SET:
5894 return "ABORT_TASK_SET";
5895 case FCP_CLEAR_TASK_SET:
5896 return "FCP_CLEAR_TASK_SET";
5898 return "FCP_BUS_RESET";
5900 return "FCP_LUN_RESET";
5901 case FCP_TARGET_RESET:
5902 return "FCP_TARGET_RESET";
5904 return "FCP_CLEAR_ACA";
5905 case FCP_TERMINATE_TASK:
5906 return "FCP_TERMINATE_TASK";
5914 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5915 * @vport: The virtual port for which this call is being executed.
5916 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5918 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5925 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5927 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5930 uint8_t rsp_info_code;
5935 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5936 "0703 fcp_rsp is missing\n");
5938 rsp_info = fcprsp->rspStatus2;
5939 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5940 rsp_info_code = fcprsp->rspInfo3;
5943 lpfc_printf_vlog(vport, KERN_INFO,
5945 "0706 fcp_rsp valid 0x%x,"
5946 " rsp len=%d code 0x%x\n",
5948 rsp_len, rsp_info_code);
5950 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5951 * field specifies the number of valid bytes of FCP_RSP_INFO.
5952 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5954 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5955 ((rsp_len == 8) || (rsp_len == 4))) {
5956 switch (rsp_info_code) {
5957 case RSP_NO_FAILURE:
5958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5959 "0715 Task Mgmt No Failure\n");
5962 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5963 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5964 "0716 Task Mgmt Target "
5967 case RSP_TM_NOT_COMPLETED: /* TM failed */
5968 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5969 "0717 Task Mgmt Target "
5972 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5973 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5974 "0718 Task Mgmt to invalid "
5985 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5986 * @vport: The virtual port for which this call is being executed.
5987 * @cmnd: Pointer to scsi_cmnd data structure.
5988 * @tgt_id: Target ID of remote device.
5989 * @lun_id: Lun number for the TMF
5990 * @task_mgmt_cmd: type of TMF to send
5992 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
6000 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
6001 unsigned int tgt_id, uint64_t lun_id,
6002 uint8_t task_mgmt_cmd)
6004 struct lpfc_hba *phba = vport->phba;
6005 struct lpfc_io_buf *lpfc_cmd;
6006 struct lpfc_iocbq *iocbq;
6007 struct lpfc_iocbq *iocbqrsp;
6008 struct lpfc_rport_data *rdata;
6009 struct lpfc_nodelist *pnode;
6013 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6014 if (!rdata || !rdata->pnode)
6016 pnode = rdata->pnode;
6018 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
6019 if (lpfc_cmd == NULL)
6021 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
6022 lpfc_cmd->rdata = rdata;
6023 lpfc_cmd->pCmd = cmnd;
6024 lpfc_cmd->ndlp = pnode;
6026 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
6029 lpfc_release_scsi_buf(phba, lpfc_cmd);
6033 iocbq = &lpfc_cmd->cur_iocbq;
6034 iocbqrsp = lpfc_sli_get_iocbq(phba);
6035 if (iocbqrsp == NULL) {
6036 lpfc_release_scsi_buf(phba, lpfc_cmd);
6039 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
6041 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6042 "0702 Issue %s to TGT %d LUN %llu "
6043 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
6044 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6045 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
6048 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
6049 iocbq, iocbqrsp, lpfc_cmd->timeout);
6050 if ((status != IOCB_SUCCESS) ||
6051 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
6052 if (status != IOCB_SUCCESS ||
6053 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
6054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6055 "0727 TMF %s to TGT %d LUN %llu "
6056 "failed (%d, %d) iocb_flag x%x\n",
6057 lpfc_taskmgmt_name(task_mgmt_cmd),
6059 iocbqrsp->iocb.ulpStatus,
6060 iocbqrsp->iocb.un.ulpWord[4],
6062 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
6063 if (status == IOCB_SUCCESS) {
6064 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
6065 /* Something in the FCP_RSP was invalid.
6066 * Check conditions */
6067 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
6070 } else if (status == IOCB_TIMEDOUT) {
6071 ret = TIMEOUT_ERROR;
6078 lpfc_sli_release_iocbq(phba, iocbqrsp);
6080 if (ret != TIMEOUT_ERROR)
6081 lpfc_release_scsi_buf(phba, lpfc_cmd);
6087 * lpfc_chk_tgt_mapped -
6088 * @vport: The virtual port to check on
6089 * @cmnd: Pointer to scsi_cmnd data structure.
6091 * This routine delays until the scsi target (aka rport) for the
6092 * command exists (is present and logged in) or we declare it non-existent.
6099 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
6101 struct lpfc_rport_data *rdata;
6102 struct lpfc_nodelist *pnode;
6103 unsigned long later;
6105 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6107 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6108 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
6111 pnode = rdata->pnode;
6113 * If target is not in a MAPPED state, delay until
6114 * target is rediscovered or devloss timeout expires.
6116 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6117 while (time_after(later, jiffies)) {
6120 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
6122 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
6123 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6126 pnode = rdata->pnode;
6128 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
6134 * lpfc_reset_flush_io_context -
6135 * @vport: The virtual port (scsi_host) for the flush context
6136 * @tgt_id: If aborting by Target contect - specifies the target id
6137 * @lun_id: If aborting by Lun context - specifies the lun id
6138 * @context: specifies the context level to flush at.
6140 * After a reset condition via TMF, we need to flush orphaned i/o
6141 * contexts from the adapter. This routine aborts any contexts
6142 * outstanding, then waits for their completions. The wait is
6143 * bounded by devloss_tmo though.
6150 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6151 uint64_t lun_id, lpfc_ctx_cmd context)
6153 struct lpfc_hba *phba = vport->phba;
6154 unsigned long later;
6157 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6159 lpfc_sli_abort_taskmgmt(vport,
6160 &phba->sli.sli3_ring[LPFC_FCP_RING],
6161 tgt_id, lun_id, context);
6162 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6163 while (time_after(later, jiffies) && cnt) {
6164 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6165 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6168 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6169 "0724 I/O flush failure for context %s : cnt x%x\n",
6170 ((context == LPFC_CTX_LUN) ? "LUN" :
6171 ((context == LPFC_CTX_TGT) ? "TGT" :
6172 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6180 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
6181 * @cmnd: Pointer to scsi_cmnd data structure.
6183 * This routine does a device reset by sending a LUN_RESET task management
6191 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6193 struct Scsi_Host *shost = cmnd->device->host;
6194 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6195 struct lpfc_rport_data *rdata;
6196 struct lpfc_nodelist *pnode;
6197 unsigned tgt_id = cmnd->device->id;
6198 uint64_t lun_id = cmnd->device->lun;
6199 struct lpfc_scsi_event_header scsi_event;
6201 u32 logit = LOG_FCP;
6203 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6204 if (!rdata || !rdata->pnode) {
6205 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6206 "0798 Device Reset rdata failure: rdata x%px\n",
6210 pnode = rdata->pnode;
6211 status = fc_block_scsi_eh(cmnd);
6212 if (status != 0 && status != SUCCESS)
6215 status = lpfc_chk_tgt_mapped(vport, cmnd);
6216 if (status == FAILED) {
6217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6218 "0721 Device Reset rport failure: rdata x%px\n", rdata);
6222 scsi_event.event_type = FC_REG_SCSI_EVENT;
6223 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6224 scsi_event.lun = lun_id;
6225 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6226 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6228 fc_host_post_vendor_event(shost, fc_get_event_number(),
6229 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6231 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6233 if (status != SUCCESS)
6234 logit = LOG_TRACE_EVENT;
6236 lpfc_printf_vlog(vport, KERN_ERR, logit,
6237 "0713 SCSI layer issued Device Reset (%d, %llu) "
6238 "return x%x\n", tgt_id, lun_id, status);
6241 * We have to clean up i/o as : they may be orphaned by the TMF;
6242 * or if the TMF failed, they may be in an indeterminate state.
6244 * We will report success if all the i/o aborts successfully.
6246 if (status == SUCCESS)
6247 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6254 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6255 * @cmnd: Pointer to scsi_cmnd data structure.
6257 * This routine does a target reset by sending a TARGET_RESET task management
6265 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6267 struct Scsi_Host *shost = cmnd->device->host;
6268 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6269 struct lpfc_rport_data *rdata;
6270 struct lpfc_nodelist *pnode;
6271 unsigned tgt_id = cmnd->device->id;
6272 uint64_t lun_id = cmnd->device->lun;
6273 struct lpfc_scsi_event_header scsi_event;
6275 u32 logit = LOG_FCP;
6276 unsigned long flags;
6277 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6279 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6280 if (!rdata || !rdata->pnode) {
6281 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6282 "0799 Target Reset rdata failure: rdata x%px\n",
6286 pnode = rdata->pnode;
6287 status = fc_block_scsi_eh(cmnd);
6288 if (status != 0 && status != SUCCESS)
6291 status = lpfc_chk_tgt_mapped(vport, cmnd);
6292 if (status == FAILED) {
6293 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6294 "0722 Target Reset rport failure: rdata x%px\n", rdata);
6296 spin_lock_irqsave(&pnode->lock, flags);
6297 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6298 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6299 spin_unlock_irqrestore(&pnode->lock, flags);
6301 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6303 return FAST_IO_FAIL;
6306 scsi_event.event_type = FC_REG_SCSI_EVENT;
6307 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6309 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6310 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6312 fc_host_post_vendor_event(shost, fc_get_event_number(),
6313 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6315 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6317 if (status != SUCCESS)
6318 logit = LOG_TRACE_EVENT;
6319 spin_lock_irqsave(&pnode->lock, flags);
6320 if (status != SUCCESS &&
6321 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
6322 !pnode->logo_waitq) {
6323 pnode->logo_waitq = &waitq;
6324 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6325 pnode->nlp_flag |= NLP_ISSUE_LOGO;
6326 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
6327 spin_unlock_irqrestore(&pnode->lock, flags);
6328 lpfc_unreg_rpi(vport, pnode);
6329 wait_event_timeout(waitq,
6330 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
6331 msecs_to_jiffies(vport->cfg_devloss_tmo *
6334 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
6335 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6336 "0725 SCSI layer TGTRST failed & LOGO TMO "
6337 " (%d, %llu) return x%x\n", tgt_id,
6339 spin_lock_irqsave(&pnode->lock, flags);
6340 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
6342 spin_lock_irqsave(&pnode->lock, flags);
6344 pnode->logo_waitq = NULL;
6345 spin_unlock_irqrestore(&pnode->lock, flags);
6349 spin_unlock_irqrestore(&pnode->lock, flags);
6352 lpfc_printf_vlog(vport, KERN_ERR, logit,
6353 "0723 SCSI layer issued Target Reset (%d, %llu) "
6354 "return x%x\n", tgt_id, lun_id, status);
6357 * We have to clean up i/o as : they may be orphaned by the TMF;
6358 * or if the TMF failed, they may be in an indeterminate state.
6360 * We will report success if all the i/o aborts successfully.
6362 if (status == SUCCESS)
6363 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6369 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
6370 * @cmnd: Pointer to scsi_cmnd data structure.
6372 * This routine does target reset to all targets on @cmnd->device->host.
6373 * This emulates Parallel SCSI Bus Reset Semantics.
6380 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
6382 struct Scsi_Host *shost = cmnd->device->host;
6383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6384 struct lpfc_nodelist *ndlp = NULL;
6385 struct lpfc_scsi_event_header scsi_event;
6387 int ret = SUCCESS, status, i;
6388 u32 logit = LOG_FCP;
6390 scsi_event.event_type = FC_REG_SCSI_EVENT;
6391 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
6393 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6394 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6396 fc_host_post_vendor_event(shost, fc_get_event_number(),
6397 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6399 status = fc_block_scsi_eh(cmnd);
6400 if (status != 0 && status != SUCCESS)
6404 * Since the driver manages a single bus device, reset all
6405 * targets known to the driver. Should any target reset
6406 * fail, this routine returns failure to the midlayer.
6408 for (i = 0; i < LPFC_MAX_TARGET; i++) {
6409 /* Search for mapped node by target ID */
6411 spin_lock_irq(shost->host_lock);
6412 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6414 if (vport->phba->cfg_fcp2_no_tgt_reset &&
6415 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6417 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6418 ndlp->nlp_sid == i &&
6420 ndlp->nlp_type & NLP_FCP_TARGET) {
6425 spin_unlock_irq(shost->host_lock);
6429 status = lpfc_send_taskmgmt(vport, cmnd,
6430 i, 0, FCP_TARGET_RESET);
6432 if (status != SUCCESS) {
6433 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6434 "0700 Bus Reset on target %d failed\n",
6440 * We have to clean up i/o as : they may be orphaned by the TMFs
6441 * above; or if any of the TMFs failed, they may be in an
6442 * indeterminate state.
6443 * We will report success if all the i/o aborts successfully.
6446 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6447 if (status != SUCCESS)
6450 logit = LOG_TRACE_EVENT;
6452 lpfc_printf_vlog(vport, KERN_ERR, logit,
6453 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
6458 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6459 * @cmnd: Pointer to scsi_cmnd data structure.
6461 * This routine does host reset to the adaptor port. It brings the HBA
6462 * offline, performs a board restart, and then brings the board back online.
6463 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6464 * reject all outstanding SCSI commands to the host and error returned
6465 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6466 * of error handling, it will only return error if resetting of the adapter
6467 * is not successful; in all other cases, will return success.
6474 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6476 struct Scsi_Host *shost = cmnd->device->host;
6477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6478 struct lpfc_hba *phba = vport->phba;
6479 int rc, ret = SUCCESS;
6481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6482 "3172 SCSI layer issued Host Reset Data:\n");
6484 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6486 rc = lpfc_sli_brdrestart(phba);
6490 rc = lpfc_online(phba);
6494 lpfc_unblock_mgmt_io(phba);
6498 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6499 "3323 Failed host reset\n");
6500 lpfc_unblock_mgmt_io(phba);
6505 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6506 * @sdev: Pointer to scsi_device.
6508 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6509 * globally available list of scsi buffers. This routine also makes sure scsi
6510 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6511 * of scsi buffer exists for the lifetime of the driver.
6518 lpfc_slave_alloc(struct scsi_device *sdev)
6520 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6521 struct lpfc_hba *phba = vport->phba;
6522 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6524 uint32_t num_to_alloc = 0;
6525 int num_allocated = 0;
6527 struct lpfc_device_data *device_data;
6528 unsigned long flags;
6529 struct lpfc_name target_wwpn;
6531 if (!rport || fc_remote_port_chkready(rport))
6534 if (phba->cfg_fof) {
6537 * Check to see if the device data structure for the lun
6538 * exists. If not, create one.
6541 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6542 spin_lock_irqsave(&phba->devicelock, flags);
6543 device_data = __lpfc_get_device_data(phba,
6545 &vport->fc_portname,
6549 spin_unlock_irqrestore(&phba->devicelock, flags);
6550 device_data = lpfc_create_device_data(phba,
6551 &vport->fc_portname,
6554 phba->cfg_XLanePriority,
6558 spin_lock_irqsave(&phba->devicelock, flags);
6559 list_add_tail(&device_data->listentry, &phba->luns);
6561 device_data->rport_data = rport->dd_data;
6562 device_data->available = true;
6563 spin_unlock_irqrestore(&phba->devicelock, flags);
6564 sdev->hostdata = device_data;
6566 sdev->hostdata = rport->dd_data;
6568 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6570 /* For SLI4, all IO buffers are pre-allocated */
6571 if (phba->sli_rev == LPFC_SLI_REV4)
6574 /* This code path is now ONLY for SLI3 adapters */
6577 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6578 * available list of scsi buffers. Don't allocate more than the
6579 * HBA limit conveyed to the midlayer via the host structure. The
6580 * formula accounts for the lun_queue_depth + error handlers + 1
6581 * extra. This list of scsi bufs exists for the lifetime of the driver.
6583 total = phba->total_scsi_bufs;
6584 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6586 /* If allocated buffers are enough do nothing */
6587 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6590 /* Allow some exchanges to be available always to complete discovery */
6591 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6592 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6593 "0704 At limitation of %d preallocated "
6594 "command buffers\n", total);
6596 /* Allow some exchanges to be available always to complete discovery */
6597 } else if (total + num_to_alloc >
6598 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6599 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6600 "0705 Allocation request of %d "
6601 "command buffers will exceed max of %d. "
6602 "Reducing allocation request to %d.\n",
6603 num_to_alloc, phba->cfg_hba_queue_depth,
6604 (phba->cfg_hba_queue_depth - total));
6605 num_to_alloc = phba->cfg_hba_queue_depth - total;
6607 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6608 if (num_to_alloc != num_allocated) {
6609 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6610 "0708 Allocation request of %d "
6611 "command buffers did not succeed. "
6612 "Allocated %d buffers.\n",
6613 num_to_alloc, num_allocated);
6615 if (num_allocated > 0)
6616 phba->total_scsi_bufs += num_allocated;
6621 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6622 * @sdev: Pointer to scsi_device.
6624 * This routine configures following items
6625 * - Tag command queuing support for @sdev if supported.
6626 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6632 lpfc_slave_configure(struct scsi_device *sdev)
6634 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6635 struct lpfc_hba *phba = vport->phba;
6637 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6639 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6640 lpfc_sli_handle_fast_ring_event(phba,
6641 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6642 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6643 lpfc_poll_rearm_timer(phba);
6650 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6651 * @sdev: Pointer to scsi_device.
6653 * This routine sets @sdev hostatdata filed to null.
6656 lpfc_slave_destroy(struct scsi_device *sdev)
6658 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6659 struct lpfc_hba *phba = vport->phba;
6660 unsigned long flags;
6661 struct lpfc_device_data *device_data = sdev->hostdata;
6663 atomic_dec(&phba->sdev_cnt);
6664 if ((phba->cfg_fof) && (device_data)) {
6665 spin_lock_irqsave(&phba->devicelock, flags);
6666 device_data->available = false;
6667 if (!device_data->oas_enabled)
6668 lpfc_delete_device_data(phba, device_data);
6669 spin_unlock_irqrestore(&phba->devicelock, flags);
6671 sdev->hostdata = NULL;
6676 * lpfc_create_device_data - creates and initializes device data structure for OAS
6677 * @phba: Pointer to host bus adapter structure.
6678 * @vport_wwpn: Pointer to vport's wwpn information
6679 * @target_wwpn: Pointer to target's wwpn information
6680 * @lun: Lun on target
6682 * @atomic_create: Flag to indicate if memory should be allocated using the
6683 * GFP_ATOMIC flag or not.
6685 * This routine creates a device data structure which will contain identifying
6686 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6687 * whether or not the corresponding lun is available by the system,
6688 * and pointer to the rport data.
6692 * Pointer to lpfc_device_data - Success
6694 struct lpfc_device_data*
6695 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6696 struct lpfc_name *target_wwpn, uint64_t lun,
6697 uint32_t pri, bool atomic_create)
6700 struct lpfc_device_data *lun_info;
6703 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6707 /* Attempt to create the device data to contain lun info */
6710 memory_flags = GFP_ATOMIC;
6712 memory_flags = GFP_KERNEL;
6713 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6716 INIT_LIST_HEAD(&lun_info->listentry);
6717 lun_info->rport_data = NULL;
6718 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6719 sizeof(struct lpfc_name));
6720 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6721 sizeof(struct lpfc_name));
6722 lun_info->device_id.lun = lun;
6723 lun_info->oas_enabled = false;
6724 lun_info->priority = pri;
6725 lun_info->available = false;
6730 * lpfc_delete_device_data - frees a device data structure for OAS
6731 * @phba: Pointer to host bus adapter structure.
6732 * @lun_info: Pointer to device data structure to free.
6734 * This routine frees the previously allocated device data structure passed.
6738 lpfc_delete_device_data(struct lpfc_hba *phba,
6739 struct lpfc_device_data *lun_info)
6742 if (unlikely(!phba) || !lun_info ||
6746 if (!list_empty(&lun_info->listentry))
6747 list_del(&lun_info->listentry);
6748 mempool_free(lun_info, phba->device_data_mem_pool);
6753 * __lpfc_get_device_data - returns the device data for the specified lun
6754 * @phba: Pointer to host bus adapter structure.
6755 * @list: Point to list to search.
6756 * @vport_wwpn: Pointer to vport's wwpn information
6757 * @target_wwpn: Pointer to target's wwpn information
6758 * @lun: Lun on target
6760 * This routine searches the list passed for the specified lun's device data.
6761 * This function does not hold locks, it is the responsibility of the caller
6762 * to ensure the proper lock is held before calling the function.
6766 * Pointer to lpfc_device_data - Success
6768 struct lpfc_device_data*
6769 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6770 struct lpfc_name *vport_wwpn,
6771 struct lpfc_name *target_wwpn, uint64_t lun)
6774 struct lpfc_device_data *lun_info;
6776 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6780 /* Check to see if the lun is already enabled for OAS. */
6782 list_for_each_entry(lun_info, list, listentry) {
6783 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6784 sizeof(struct lpfc_name)) == 0) &&
6785 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6786 sizeof(struct lpfc_name)) == 0) &&
6787 (lun_info->device_id.lun == lun))
6795 * lpfc_find_next_oas_lun - searches for the next oas lun
6796 * @phba: Pointer to host bus adapter structure.
6797 * @vport_wwpn: Pointer to vport's wwpn information
6798 * @target_wwpn: Pointer to target's wwpn information
6799 * @starting_lun: Pointer to the lun to start searching for
6800 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6801 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6802 * @found_lun: Pointer to the found lun.
6803 * @found_lun_status: Pointer to status of the found lun.
6804 * @found_lun_pri: Pointer to priority of the found lun.
6806 * This routine searches the luns list for the specified lun
6807 * or the first lun for the vport/target. If the vport wwpn contains
6808 * a zero value then a specific vport is not specified. In this case
6809 * any vport which contains the lun will be considered a match. If the
6810 * target wwpn contains a zero value then a specific target is not specified.
6811 * In this case any target which contains the lun will be considered a
6812 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6813 * are returned. The function will also return the next lun if available.
6814 * If the next lun is not found, starting_lun parameter will be set to
6822 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6823 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6824 struct lpfc_name *found_vport_wwpn,
6825 struct lpfc_name *found_target_wwpn,
6826 uint64_t *found_lun,
6827 uint32_t *found_lun_status,
6828 uint32_t *found_lun_pri)
6831 unsigned long flags;
6832 struct lpfc_device_data *lun_info;
6833 struct lpfc_device_id *device_id;
6837 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6838 !starting_lun || !found_vport_wwpn ||
6839 !found_target_wwpn || !found_lun || !found_lun_status ||
6840 (*starting_lun == NO_MORE_OAS_LUN) ||
6844 lun = *starting_lun;
6845 *found_lun = NO_MORE_OAS_LUN;
6846 *starting_lun = NO_MORE_OAS_LUN;
6848 /* Search for lun or the lun closet in value */
6850 spin_lock_irqsave(&phba->devicelock, flags);
6851 list_for_each_entry(lun_info, &phba->luns, listentry) {
6852 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6853 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6854 sizeof(struct lpfc_name)) == 0)) &&
6855 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6856 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6857 sizeof(struct lpfc_name)) == 0)) &&
6858 (lun_info->oas_enabled)) {
6859 device_id = &lun_info->device_id;
6861 ((lun == FIND_FIRST_OAS_LUN) ||
6862 (device_id->lun == lun))) {
6863 *found_lun = device_id->lun;
6864 memcpy(found_vport_wwpn,
6865 &device_id->vport_wwpn,
6866 sizeof(struct lpfc_name));
6867 memcpy(found_target_wwpn,
6868 &device_id->target_wwpn,
6869 sizeof(struct lpfc_name));
6870 if (lun_info->available)
6872 OAS_LUN_STATUS_EXISTS;
6874 *found_lun_status = 0;
6875 *found_lun_pri = lun_info->priority;
6876 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6877 memset(vport_wwpn, 0x0,
6878 sizeof(struct lpfc_name));
6879 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6880 memset(target_wwpn, 0x0,
6881 sizeof(struct lpfc_name));
6884 *starting_lun = device_id->lun;
6885 memcpy(vport_wwpn, &device_id->vport_wwpn,
6886 sizeof(struct lpfc_name));
6887 memcpy(target_wwpn, &device_id->target_wwpn,
6888 sizeof(struct lpfc_name));
6893 spin_unlock_irqrestore(&phba->devicelock, flags);
6898 * lpfc_enable_oas_lun - enables a lun for OAS operations
6899 * @phba: Pointer to host bus adapter structure.
6900 * @vport_wwpn: Pointer to vport's wwpn information
6901 * @target_wwpn: Pointer to target's wwpn information
6905 * This routine enables a lun for oas operations. The routines does so by
6906 * doing the following :
6908 * 1) Checks to see if the device data for the lun has been created.
6909 * 2) If found, sets the OAS enabled flag if not set and returns.
6910 * 3) Otherwise, creates a device data structure.
6911 * 4) If successfully created, indicates the device data is for an OAS lun,
6912 * indicates the lun is not available and add to the list of luns.
6919 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6920 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6923 struct lpfc_device_data *lun_info;
6924 unsigned long flags;
6926 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6930 spin_lock_irqsave(&phba->devicelock, flags);
6932 /* Check to see if the device data for the lun has been created */
6933 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6936 if (!lun_info->oas_enabled)
6937 lun_info->oas_enabled = true;
6938 lun_info->priority = pri;
6939 spin_unlock_irqrestore(&phba->devicelock, flags);
6943 /* Create an lun info structure and add to list of luns */
6944 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6947 lun_info->oas_enabled = true;
6948 lun_info->priority = pri;
6949 lun_info->available = false;
6950 list_add_tail(&lun_info->listentry, &phba->luns);
6951 spin_unlock_irqrestore(&phba->devicelock, flags);
6954 spin_unlock_irqrestore(&phba->devicelock, flags);
6959 * lpfc_disable_oas_lun - disables a lun for OAS operations
6960 * @phba: Pointer to host bus adapter structure.
6961 * @vport_wwpn: Pointer to vport's wwpn information
6962 * @target_wwpn: Pointer to target's wwpn information
6966 * This routine disables a lun for oas operations. The routines does so by
6967 * doing the following :
6969 * 1) Checks to see if the device data for the lun is created.
6970 * 2) If present, clears the flag indicating this lun is for OAS.
6971 * 3) If the lun is not available by the system, the device data is
6979 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6980 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6983 struct lpfc_device_data *lun_info;
6984 unsigned long flags;
6986 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6990 spin_lock_irqsave(&phba->devicelock, flags);
6992 /* Check to see if the lun is available. */
6993 lun_info = __lpfc_get_device_data(phba,
6994 &phba->luns, vport_wwpn,
6997 lun_info->oas_enabled = false;
6998 lun_info->priority = pri;
6999 if (!lun_info->available)
7000 lpfc_delete_device_data(phba, lun_info);
7001 spin_unlock_irqrestore(&phba->devicelock, flags);
7005 spin_unlock_irqrestore(&phba->devicelock, flags);
7010 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
7012 return SCSI_MLQUEUE_HOST_BUSY;
7016 lpfc_no_handler(struct scsi_cmnd *cmnd)
7022 lpfc_no_slave(struct scsi_device *sdev)
7027 struct scsi_host_template lpfc_template_nvme = {
7028 .module = THIS_MODULE,
7029 .name = LPFC_DRIVER_NAME,
7030 .proc_name = LPFC_DRIVER_NAME,
7032 .queuecommand = lpfc_no_command,
7033 .eh_abort_handler = lpfc_no_handler,
7034 .eh_device_reset_handler = lpfc_no_handler,
7035 .eh_target_reset_handler = lpfc_no_handler,
7036 .eh_bus_reset_handler = lpfc_no_handler,
7037 .eh_host_reset_handler = lpfc_no_handler,
7038 .slave_alloc = lpfc_no_slave,
7039 .slave_configure = lpfc_no_slave,
7040 .scan_finished = lpfc_scan_finished,
7044 .shost_attrs = lpfc_hba_attrs,
7045 .max_sectors = 0xFFFFFFFF,
7046 .vendor_id = LPFC_NL_VENDOR_ID,
7047 .track_queue_depth = 0,
7050 struct scsi_host_template lpfc_template = {
7051 .module = THIS_MODULE,
7052 .name = LPFC_DRIVER_NAME,
7053 .proc_name = LPFC_DRIVER_NAME,
7055 .queuecommand = lpfc_queuecommand,
7056 .eh_timed_out = fc_eh_timed_out,
7057 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
7058 .eh_abort_handler = lpfc_abort_handler,
7059 .eh_device_reset_handler = lpfc_device_reset_handler,
7060 .eh_target_reset_handler = lpfc_target_reset_handler,
7061 .eh_bus_reset_handler = lpfc_bus_reset_handler,
7062 .eh_host_reset_handler = lpfc_host_reset_handler,
7063 .slave_alloc = lpfc_slave_alloc,
7064 .slave_configure = lpfc_slave_configure,
7065 .slave_destroy = lpfc_slave_destroy,
7066 .scan_finished = lpfc_scan_finished,
7068 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
7069 .cmd_per_lun = LPFC_CMD_PER_LUN,
7070 .shost_attrs = lpfc_hba_attrs,
7071 .max_sectors = 0xFFFFFFFF,
7072 .vendor_id = LPFC_NL_VENDOR_ID,
7073 .change_queue_depth = scsi_change_queue_depth,
7074 .track_queue_depth = 1,