2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
42 const char *fnic_state_str[] = {
43 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
49 static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
57 static const char *fcpio_status_str[] = {
58 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
79 const char *fnic_state_to_str(unsigned int state)
81 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
84 return fnic_state_str[state];
87 static const char *fnic_ioreq_state_to_str(unsigned int state)
89 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90 !fnic_ioreq_state_str[state])
93 return fnic_ioreq_state_str[state];
96 static const char *fnic_fcpio_status_to_str(unsigned int status)
98 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
101 return fcpio_status_str[status];
104 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
106 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107 struct scsi_cmnd *sc)
109 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
111 return &fnic->io_req_lock[hash];
114 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
124 static void fnic_release_ioreq_buf(struct fnic *fnic,
125 struct fnic_io_req *io_req,
126 struct scsi_cmnd *sc)
128 if (io_req->sgl_list_pa)
129 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa)
138 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
142 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
145 /* if no Ack received from firmware, then nothing to clean */
146 if (!fnic->fw_ack_recd[0])
150 * Update desc_available count based on number of freed descriptors
151 * Account for wraparound
153 if (wq->to_clean_index <= fnic->fw_ack_index[0])
154 wq->ring.desc_avail += (fnic->fw_ack_index[0]
155 - wq->to_clean_index + 1);
157 wq->ring.desc_avail += (wq->ring.desc_count
159 + fnic->fw_ack_index[0] + 1);
162 * just bump clean index to ack_index+1 accounting for wraparound
163 * this will essentially free up all descriptors between
164 * to_clean_index and fw_ack_index, both inclusive
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
169 /* we have processed the acks received so far */
170 fnic->fw_ack_recd[0] = 0;
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
180 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181 unsigned long clearbits)
183 struct Scsi_Host *host = fnic->lport->host;
184 int sh_locked = spin_is_locked(host->host_lock);
185 unsigned long flags = 0;
188 spin_lock_irqsave(host->host_lock, flags);
191 fnic->state_flags &= ~st_flags;
193 fnic->state_flags |= st_flags;
196 spin_unlock_irqrestore(host->host_lock, flags);
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
206 int fnic_fw_reset_handler(struct fnic *fnic)
208 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
212 /* indicate fwreset to io path */
213 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
215 skb_queue_purge(&fnic->frame_queue);
216 skb_queue_purge(&fnic->tx_queue);
218 /* wait for io cmpl */
219 while (atomic_read(&fnic->in_flight))
220 schedule_timeout(msecs_to_jiffies(1));
222 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225 free_wq_copy_descs(fnic, wq);
227 if (!vnic_wq_copy_desc_avail(wq))
230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
236 &fnic->fnic_stats.fw_stats.active_fw_reqs));
239 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
242 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244 "Issued fw reset\n");
246 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248 "Failed to issue fw reset\n");
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
259 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
261 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262 enum fcpio_flogi_reg_format_type format;
263 struct fc_lport *lp = fnic->lport;
268 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271 free_wq_copy_descs(fnic, wq);
273 if (!vnic_wq_copy_desc_avail(wq)) {
275 goto flogi_reg_ioreq_end;
278 if (fnic->ctlr.map_dest) {
279 memset(gw_mac, 0xff, ETH_ALEN);
280 format = FCPIO_FLOGI_REG_DEF_DEST;
282 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283 format = FCPIO_FLOGI_REG_GW_DEST;
286 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
290 lp->r_a_tov, lp->e_d_tov);
291 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293 fc_id, fnic->data_src_addr, gw_mac);
295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296 format, fc_id, gw_mac);
297 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298 "FLOGI reg issued fcid %x map %d dest %pM\n",
299 fc_id, fnic->ctlr.map_dest, gw_mac);
302 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
309 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
317 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318 struct vnic_wq_copy *wq,
319 struct fnic_io_req *io_req,
320 struct scsi_cmnd *sc,
323 struct scatterlist *sg;
324 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325 struct fc_rport_libfc_priv *rp = rport->dd_data;
326 struct host_sg_desc *desc;
327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
329 unsigned long intr_flags;
332 struct scsi_lun fc_lun;
336 /* For each SGE, create a device desc entry */
337 desc = io_req->sgl_list;
338 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
339 desc->addr = cpu_to_le64(sg_dma_address(sg));
340 desc->len = cpu_to_le32(sg_dma_len(sg));
345 io_req->sgl_list_pa = pci_map_single
348 sizeof(io_req->sgl_list[0]) * sg_count,
351 r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
353 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
354 return SCSI_MLQUEUE_HOST_BUSY;
358 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
360 SCSI_SENSE_BUFFERSIZE,
363 r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
365 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
366 sizeof(io_req->sgl_list[0]) * sg_count,
368 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
369 return SCSI_MLQUEUE_HOST_BUSY;
372 int_to_scsilun(sc->device->lun, &fc_lun);
374 /* Enqueue the descriptor in the Copy WQ */
375 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
377 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
378 free_wq_copy_descs(fnic, wq);
380 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
381 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
382 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
383 "fnic_queue_wq_copy_desc failure - no descriptors\n");
384 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
385 return SCSI_MLQUEUE_HOST_BUSY;
389 if (sc->sc_data_direction == DMA_FROM_DEVICE)
390 flags = FCPIO_ICMND_RDDATA;
391 else if (sc->sc_data_direction == DMA_TO_DEVICE)
392 flags = FCPIO_ICMND_WRDATA;
395 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
396 (rp->flags & FC_RP_FLAGS_RETRY))
397 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
399 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
400 0, exch_flags, io_req->sgl_cnt,
401 SCSI_SENSE_BUFFERSIZE,
403 io_req->sense_buf_pa,
404 0, /* scsi cmd ref, always 0 */
405 FCPIO_ICMND_PTA_SIMPLE,
406 /* scsi pri and tag */
407 flags, /* command flags */
408 sc->cmnd, sc->cmd_len,
410 fc_lun.scsi_lun, io_req->port_id,
411 rport->maxframe_size, rp->r_a_tov,
414 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
415 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
416 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
417 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
418 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
420 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
426 * Routine to send a scsi cdb
427 * Called with host_lock held and interrupts disabled.
429 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
431 struct fc_lport *lp = shost_priv(sc->device->host);
432 struct fc_rport *rport;
433 struct fnic_io_req *io_req = NULL;
434 struct fnic *fnic = lport_priv(lp);
435 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
436 struct vnic_wq_copy *wq;
440 unsigned long flags = 0;
442 spinlock_t *io_lock = NULL;
443 int io_lock_acquired = 0;
444 struct fc_rport_libfc_priv *rp;
446 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
447 return SCSI_MLQUEUE_HOST_BUSY;
449 rport = starget_to_rport(scsi_target(sc->device));
451 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
452 "returning DID_NO_CONNECT for IO as rport is NULL\n");
453 sc->result = DID_NO_CONNECT << 16;
458 ret = fc_remote_port_chkready(rport);
460 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
461 "rport is not ready\n");
462 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
469 if (!rp || rp->rp_state != RPORT_ST_READY) {
470 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
471 "returning DID_NO_CONNECT for IO as rport is removed\n");
472 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
473 sc->result = DID_NO_CONNECT<<16;
478 if (lp->state != LPORT_ST_READY || !(lp->link_up))
479 return SCSI_MLQUEUE_HOST_BUSY;
481 atomic_inc(&fnic->in_flight);
484 * Release host lock, use driver resource specific locks from here.
485 * Don't re-enable interrupts in case they were disabled prior to the
486 * caller disabling them.
488 spin_unlock(lp->host->host_lock);
489 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
490 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
492 /* Get a new io_req for this SCSI IO */
493 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
495 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
496 ret = SCSI_MLQUEUE_HOST_BUSY;
499 memset(io_req, 0, sizeof(*io_req));
501 /* Map the data buffer */
502 sg_count = scsi_dma_map(sc);
504 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
505 sc->request->tag, sc, 0, sc->cmnd[0],
506 sg_count, CMD_STATE(sc));
507 mempool_free(io_req, fnic->io_req_pool);
511 /* Determine the type of scatter/gather list we need */
512 io_req->sgl_cnt = sg_count;
513 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
514 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
515 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
519 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
521 if (!io_req->sgl_list) {
522 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
523 ret = SCSI_MLQUEUE_HOST_BUSY;
525 mempool_free(io_req, fnic->io_req_pool);
529 /* Cache sgl list allocated address before alignment */
530 io_req->sgl_list_alloc = io_req->sgl_list;
531 ptr = (unsigned long) io_req->sgl_list;
532 if (ptr % FNIC_SG_DESC_ALIGN) {
533 io_req->sgl_list = (struct host_sg_desc *)
534 (((unsigned long) ptr
535 + FNIC_SG_DESC_ALIGN - 1)
536 & ~(FNIC_SG_DESC_ALIGN - 1));
541 * Will acquire lock defore setting to IO initialized.
544 io_lock = fnic_io_lock_hash(fnic, sc);
545 spin_lock_irqsave(io_lock, flags);
547 /* initialize rest of io_req */
548 io_lock_acquired = 1;
549 io_req->port_id = rport->port_id;
550 io_req->start_time = jiffies;
551 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
552 CMD_SP(sc) = (char *)io_req;
553 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
554 sc->scsi_done = done;
556 /* create copy wq desc and enqueue it */
557 wq = &fnic->wq_copy[0];
558 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
561 * In case another thread cancelled the request,
562 * refetch the pointer under the lock.
564 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
565 sc->request->tag, sc, 0, 0, 0,
566 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
567 io_req = (struct fnic_io_req *)CMD_SP(sc);
569 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
570 spin_unlock_irqrestore(io_lock, flags);
572 fnic_release_ioreq_buf(fnic, io_req, sc);
573 mempool_free(io_req, fnic->io_req_pool);
575 atomic_dec(&fnic->in_flight);
576 /* acquire host lock before returning to SCSI */
577 spin_lock(lp->host->host_lock);
580 atomic64_inc(&fnic_stats->io_stats.active_ios);
581 atomic64_inc(&fnic_stats->io_stats.num_ios);
582 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
583 atomic64_read(&fnic_stats->io_stats.max_active_ios))
584 atomic64_set(&fnic_stats->io_stats.max_active_ios,
585 atomic64_read(&fnic_stats->io_stats.active_ios));
587 /* REVISIT: Use per IO lock in the final code */
588 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
591 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
592 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
593 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
596 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
597 sc->request->tag, sc, io_req,
599 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
601 /* if only we issued IO, will we have the io lock */
602 if (io_lock_acquired)
603 spin_unlock_irqrestore(io_lock, flags);
605 atomic_dec(&fnic->in_flight);
606 /* acquire host lock before returning to SCSI */
607 spin_lock(lp->host->host_lock);
611 DEF_SCSI_QCMD(fnic_queuecommand)
614 * fnic_fcpio_fw_reset_cmpl_handler
615 * Routine to handle fw reset completion
617 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
618 struct fcpio_fw_req *desc)
622 struct fcpio_tag tag;
625 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
627 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
629 atomic64_inc(&reset_stats->fw_reset_completions);
631 /* Clean up all outstanding io requests */
632 fnic_cleanup_io(fnic, SCSI_NO_TAG);
634 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
635 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
637 spin_lock_irqsave(&fnic->fnic_lock, flags);
639 /* fnic should be in FC_TRANS_ETH_MODE */
640 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
641 /* Check status of reset completion */
643 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
644 "reset cmpl success\n");
645 /* Ready to send flogi out */
646 fnic->state = FNIC_IN_ETH_MODE;
648 FNIC_SCSI_DBG(KERN_DEBUG,
650 "fnic fw_reset : failed %s\n",
651 fnic_fcpio_status_to_str(hdr_status));
654 * Unable to change to eth mode, cannot send out flogi
655 * Change state to fc mode, so that subsequent Flogi
656 * requests from libFC will cause more attempts to
657 * reset the firmware. Free the cached flogi
659 fnic->state = FNIC_IN_FC_MODE;
660 atomic64_inc(&reset_stats->fw_reset_failures);
664 FNIC_SCSI_DBG(KERN_DEBUG,
666 "Unexpected state %s while processing"
667 " reset cmpl\n", fnic_state_to_str(fnic->state));
668 atomic64_inc(&reset_stats->fw_reset_failures);
672 /* Thread removing device blocks till firmware reset is complete */
673 if (fnic->remove_wait)
674 complete(fnic->remove_wait);
677 * If fnic is being removed, or fw reset failed
678 * free the flogi frame. Else, send it out
680 if (fnic->remove_wait || ret) {
681 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
682 skb_queue_purge(&fnic->tx_queue);
683 goto reset_cmpl_handler_end;
686 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
690 reset_cmpl_handler_end:
691 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
697 * fnic_fcpio_flogi_reg_cmpl_handler
698 * Routine to handle flogi register completion
700 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
701 struct fcpio_fw_req *desc)
705 struct fcpio_tag tag;
709 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
711 /* Update fnic state based on status of flogi reg completion */
712 spin_lock_irqsave(&fnic->fnic_lock, flags);
714 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
716 /* Check flogi registration completion status */
718 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
719 "flog reg succeeded\n");
720 fnic->state = FNIC_IN_FC_MODE;
722 FNIC_SCSI_DBG(KERN_DEBUG,
724 "fnic flogi reg :failed %s\n",
725 fnic_fcpio_status_to_str(hdr_status));
726 fnic->state = FNIC_IN_ETH_MODE;
730 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
731 "Unexpected fnic state %s while"
732 " processing flogi reg completion\n",
733 fnic_state_to_str(fnic->state));
738 if (fnic->stop_rx_link_events) {
739 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
740 goto reg_cmpl_handler_end;
742 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
745 queue_work(fnic_event_queue, &fnic->frame_work);
747 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
750 reg_cmpl_handler_end:
754 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
757 if (wq->to_clean_index <= wq->to_use_index) {
758 /* out of range, stale request_out index */
759 if (request_out < wq->to_clean_index ||
760 request_out >= wq->to_use_index)
763 /* out of range, stale request_out index */
764 if (request_out < wq->to_clean_index &&
765 request_out >= wq->to_use_index)
768 /* request_out index is in range */
774 * Mark that ack received and store the Ack index. If there are multiple
775 * acks received before Tx thread cleans it up, the latest value will be
776 * used which is correct behavior. This state should be in the copy Wq
777 * instead of in the fnic
779 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
780 unsigned int cq_index,
781 struct fcpio_fw_req *desc)
783 struct vnic_wq_copy *wq;
784 u16 request_out = desc->u.ack.request_out;
786 u64 *ox_id_tag = (u64 *)(void *)desc;
788 /* mark the ack state */
789 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
790 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
792 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
793 if (is_ack_index_in_range(wq, request_out)) {
794 fnic->fw_ack_index[0] = request_out;
795 fnic->fw_ack_recd[0] = 1;
798 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
800 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
801 FNIC_TRACE(fnic_fcpio_ack_handler,
802 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
803 ox_id_tag[4], ox_id_tag[5]);
807 * fnic_fcpio_icmnd_cmpl_handler
808 * Routine to handle icmnd completions
810 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
811 struct fcpio_fw_req *desc)
815 struct fcpio_tag tag;
818 struct fcpio_icmnd_cmpl *icmnd_cmpl;
819 struct fnic_io_req *io_req;
820 struct scsi_cmnd *sc;
821 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
825 unsigned long start_time;
827 /* Decode the cmpl description to get the io_req id */
828 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
829 fcpio_tag_id_dec(&tag, &id);
830 icmnd_cmpl = &desc->u.icmnd_cmpl;
832 if (id >= fnic->fnic_max_tag_id) {
833 shost_printk(KERN_ERR, fnic->lport->host,
834 "Tag out of range tag %x hdr status = %s\n",
835 id, fnic_fcpio_status_to_str(hdr_status));
839 sc = scsi_host_find_tag(fnic->lport->host, id);
842 atomic64_inc(&fnic_stats->io_stats.sc_null);
843 shost_printk(KERN_ERR, fnic->lport->host,
844 "icmnd_cmpl sc is null - "
845 "hdr status = %s tag = 0x%x desc = 0x%p\n",
846 fnic_fcpio_status_to_str(hdr_status), id, desc);
847 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
848 fnic->lport->host->host_no, id,
849 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
850 (u64)icmnd_cmpl->_resvd0[0]),
851 ((u64)hdr_status << 16 |
852 (u64)icmnd_cmpl->scsi_status << 8 |
853 (u64)icmnd_cmpl->flags), desc,
854 (u64)icmnd_cmpl->residual, 0);
858 io_lock = fnic_io_lock_hash(fnic, sc);
859 spin_lock_irqsave(io_lock, flags);
860 io_req = (struct fnic_io_req *)CMD_SP(sc);
861 WARN_ON_ONCE(!io_req);
863 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
864 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
865 spin_unlock_irqrestore(io_lock, flags);
866 shost_printk(KERN_ERR, fnic->lport->host,
867 "icmnd_cmpl io_req is null - "
868 "hdr status = %s tag = 0x%x sc 0x%p\n",
869 fnic_fcpio_status_to_str(hdr_status), id, sc);
872 start_time = io_req->start_time;
874 /* firmware completed the io */
875 io_req->io_completed = 1;
878 * if SCSI-ML has already issued abort on this command,
879 * set completion of the IO. The abts path will clean it up
881 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
884 * set the FNIC_IO_DONE so that this doesn't get
885 * flagged as 'out of order' if it was not aborted
887 CMD_FLAGS(sc) |= FNIC_IO_DONE;
888 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
889 spin_unlock_irqrestore(io_lock, flags);
890 if(FCPIO_ABORTED == hdr_status)
891 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
893 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
894 "icmnd_cmpl abts pending "
895 "hdr status = %s tag = 0x%x sc = 0x%p"
896 "scsi_status = %x residual = %d\n",
897 fnic_fcpio_status_to_str(hdr_status),
899 icmnd_cmpl->scsi_status,
900 icmnd_cmpl->residual);
904 /* Mark the IO as complete */
905 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
907 icmnd_cmpl = &desc->u.icmnd_cmpl;
909 switch (hdr_status) {
911 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
912 xfer_len = scsi_bufflen(sc);
913 scsi_set_resid(sc, icmnd_cmpl->residual);
915 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
916 xfer_len -= icmnd_cmpl->residual;
918 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
919 atomic64_inc(&fnic_stats->misc_stats.check_condition);
921 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
922 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
925 case FCPIO_TIMEOUT: /* request was timed out */
926 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
927 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
930 case FCPIO_ABORTED: /* request was aborted */
931 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
932 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
935 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
936 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
937 scsi_set_resid(sc, icmnd_cmpl->residual);
938 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
941 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
942 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
943 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
946 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
947 atomic64_inc(&fnic_stats->io_stats.io_not_found);
948 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
951 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
952 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
953 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
956 case FCPIO_FW_ERR: /* request was terminated due fw error */
957 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
958 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
961 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
962 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
963 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
966 case FCPIO_INVALID_HEADER: /* header contains invalid data */
967 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
968 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
970 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
974 /* Break link with the SCSI command */
976 CMD_FLAGS(sc) |= FNIC_IO_DONE;
978 spin_unlock_irqrestore(io_lock, flags);
980 if (hdr_status != FCPIO_SUCCESS) {
981 atomic64_inc(&fnic_stats->io_stats.io_failures);
982 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
983 fnic_fcpio_status_to_str(hdr_status));
986 fnic_release_ioreq_buf(fnic, io_req, sc);
988 mempool_free(io_req, fnic->io_req_pool);
990 cmd_trace = ((u64)hdr_status << 56) |
991 (u64)icmnd_cmpl->scsi_status << 48 |
992 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
993 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
994 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
996 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
997 sc->device->host->host_no, id, sc,
998 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
999 (u64)icmnd_cmpl->_resvd0[0] << 48 |
1000 jiffies_to_msecs(jiffies - start_time)),
1002 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1004 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1005 fnic->lport->host_stats.fcp_input_requests++;
1006 fnic->fcp_input_bytes += xfer_len;
1007 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1008 fnic->lport->host_stats.fcp_output_requests++;
1009 fnic->fcp_output_bytes += xfer_len;
1011 fnic->lport->host_stats.fcp_control_requests++;
1013 atomic64_dec(&fnic_stats->io_stats.active_ios);
1014 if (atomic64_read(&fnic->io_cmpl_skip))
1015 atomic64_dec(&fnic->io_cmpl_skip);
1017 atomic64_inc(&fnic_stats->io_stats.io_completions);
1019 /* Call SCSI completion function to complete the IO */
1024 /* fnic_fcpio_itmf_cmpl_handler
1025 * Routine to handle itmf completions
1027 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1028 struct fcpio_fw_req *desc)
1032 struct fcpio_tag tag;
1034 struct scsi_cmnd *sc;
1035 struct fnic_io_req *io_req;
1036 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1037 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1038 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1039 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1040 unsigned long flags;
1041 spinlock_t *io_lock;
1042 unsigned long start_time;
1044 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1045 fcpio_tag_id_dec(&tag, &id);
1047 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1048 shost_printk(KERN_ERR, fnic->lport->host,
1049 "Tag out of range tag %x hdr status = %s\n",
1050 id, fnic_fcpio_status_to_str(hdr_status));
1054 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1057 atomic64_inc(&fnic_stats->io_stats.sc_null);
1058 shost_printk(KERN_ERR, fnic->lport->host,
1059 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1060 fnic_fcpio_status_to_str(hdr_status), id);
1063 io_lock = fnic_io_lock_hash(fnic, sc);
1064 spin_lock_irqsave(io_lock, flags);
1065 io_req = (struct fnic_io_req *)CMD_SP(sc);
1066 WARN_ON_ONCE(!io_req);
1068 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1069 spin_unlock_irqrestore(io_lock, flags);
1070 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1071 shost_printk(KERN_ERR, fnic->lport->host,
1072 "itmf_cmpl io_req is null - "
1073 "hdr status = %s tag = 0x%x sc 0x%p\n",
1074 fnic_fcpio_status_to_str(hdr_status), id, sc);
1077 start_time = io_req->start_time;
1079 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1080 /* Abort and terminate completion of device reset req */
1081 /* REVISIT : Add asserts about various flags */
1082 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1083 "dev reset abts cmpl recd. id %x status %s\n",
1084 id, fnic_fcpio_status_to_str(hdr_status));
1085 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1086 CMD_ABTS_STATUS(sc) = hdr_status;
1087 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1088 if (io_req->abts_done)
1089 complete(io_req->abts_done);
1090 spin_unlock_irqrestore(io_lock, flags);
1091 } else if (id & FNIC_TAG_ABORT) {
1092 /* Completion of abort cmd */
1093 switch (hdr_status) {
1097 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1098 atomic64_inc(&abts_stats->abort_fw_timeouts);
1101 &term_stats->terminate_fw_timeouts);
1103 case FCPIO_ITMF_REJECTED:
1104 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1105 "abort reject recd. id %d\n",
1106 (int)(id & FNIC_TAG_MASK));
1108 case FCPIO_IO_NOT_FOUND:
1109 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1110 atomic64_inc(&abts_stats->abort_io_not_found);
1113 &term_stats->terminate_io_not_found);
1116 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1117 atomic64_inc(&abts_stats->abort_failures);
1120 &term_stats->terminate_failures);
1123 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1124 /* This is a late completion. Ignore it */
1125 spin_unlock_irqrestore(io_lock, flags);
1129 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1130 CMD_ABTS_STATUS(sc) = hdr_status;
1132 /* If the status is IO not found consider it as success */
1133 if (hdr_status == FCPIO_IO_NOT_FOUND)
1134 CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1136 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1137 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1139 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1140 "abts cmpl recd. id %d status %s\n",
1141 (int)(id & FNIC_TAG_MASK),
1142 fnic_fcpio_status_to_str(hdr_status));
1145 * If scsi_eh thread is blocked waiting for abts to complete,
1146 * signal completion to it. IO will be cleaned in the thread
1147 * else clean it in this context
1149 if (io_req->abts_done) {
1150 complete(io_req->abts_done);
1151 spin_unlock_irqrestore(io_lock, flags);
1153 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1154 "abts cmpl, completing IO\n");
1156 sc->result = (DID_ERROR << 16);
1158 spin_unlock_irqrestore(io_lock, flags);
1160 fnic_release_ioreq_buf(fnic, io_req, sc);
1161 mempool_free(io_req, fnic->io_req_pool);
1162 if (sc->scsi_done) {
1163 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1164 sc->device->host->host_no, id,
1166 jiffies_to_msecs(jiffies - start_time),
1168 (((u64)hdr_status << 40) |
1169 (u64)sc->cmnd[0] << 32 |
1170 (u64)sc->cmnd[2] << 24 |
1171 (u64)sc->cmnd[3] << 16 |
1172 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1173 (((u64)CMD_FLAGS(sc) << 32) |
1176 atomic64_dec(&fnic_stats->io_stats.active_ios);
1177 if (atomic64_read(&fnic->io_cmpl_skip))
1178 atomic64_dec(&fnic->io_cmpl_skip);
1180 atomic64_inc(&fnic_stats->io_stats.io_completions);
1184 } else if (id & FNIC_TAG_DEV_RST) {
1185 /* Completion of device reset */
1186 CMD_LR_STATUS(sc) = hdr_status;
1187 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1188 spin_unlock_irqrestore(io_lock, flags);
1189 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1190 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1191 sc->device->host->host_no, id, sc,
1192 jiffies_to_msecs(jiffies - start_time),
1194 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1195 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1196 "Terminate pending "
1197 "dev reset cmpl recd. id %d status %s\n",
1198 (int)(id & FNIC_TAG_MASK),
1199 fnic_fcpio_status_to_str(hdr_status));
1202 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1203 /* Need to wait for terminate completion */
1204 spin_unlock_irqrestore(io_lock, flags);
1205 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1206 sc->device->host->host_no, id, sc,
1207 jiffies_to_msecs(jiffies - start_time),
1209 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1210 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1211 "dev reset cmpl recd after time out. "
1212 "id %d status %s\n",
1213 (int)(id & FNIC_TAG_MASK),
1214 fnic_fcpio_status_to_str(hdr_status));
1217 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1218 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1219 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1220 "dev reset cmpl recd. id %d status %s\n",
1221 (int)(id & FNIC_TAG_MASK),
1222 fnic_fcpio_status_to_str(hdr_status));
1223 if (io_req->dr_done)
1224 complete(io_req->dr_done);
1225 spin_unlock_irqrestore(io_lock, flags);
1228 shost_printk(KERN_ERR, fnic->lport->host,
1229 "Unexpected itmf io state %s tag %x\n",
1230 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1231 spin_unlock_irqrestore(io_lock, flags);
1237 * fnic_fcpio_cmpl_handler
1238 * Routine to service the cq for wq_copy
1240 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1241 unsigned int cq_index,
1242 struct fcpio_fw_req *desc)
1244 struct fnic *fnic = vnic_dev_priv(vdev);
1246 switch (desc->hdr.type) {
1247 case FCPIO_ICMND_CMPL: /* fw completed a command */
1248 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1249 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1250 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1251 case FCPIO_RESET_CMPL: /* fw completed reset */
1252 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1258 switch (desc->hdr.type) {
1259 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1260 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1263 case FCPIO_ICMND_CMPL: /* fw completed a command */
1264 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1267 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1268 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1271 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1272 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1273 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1276 case FCPIO_RESET_CMPL: /* fw completed reset */
1277 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1281 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1282 "firmware completion type %d\n",
1291 * fnic_wq_copy_cmpl_handler
1292 * Routine to process wq copy
1294 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1296 unsigned int wq_work_done = 0;
1297 unsigned int i, cq_index;
1298 unsigned int cur_work_done;
1300 for (i = 0; i < fnic->wq_copy_count; i++) {
1301 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1302 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1303 fnic_fcpio_cmpl_handler,
1305 wq_work_done += cur_work_done;
1307 return wq_work_done;
1310 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1313 struct fnic_io_req *io_req;
1314 unsigned long flags = 0;
1315 struct scsi_cmnd *sc;
1316 spinlock_t *io_lock;
1317 unsigned long start_time = 0;
1318 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1320 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1321 if (i == exclude_id)
1324 io_lock = fnic_io_lock_tag(fnic, i);
1325 spin_lock_irqsave(io_lock, flags);
1326 sc = scsi_host_find_tag(fnic->lport->host, i);
1328 spin_unlock_irqrestore(io_lock, flags);
1332 io_req = (struct fnic_io_req *)CMD_SP(sc);
1333 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1334 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1336 * We will be here only when FW completes reset
1337 * without sending completions for outstanding ios.
1339 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1340 if (io_req && io_req->dr_done)
1341 complete(io_req->dr_done);
1342 else if (io_req && io_req->abts_done)
1343 complete(io_req->abts_done);
1344 spin_unlock_irqrestore(io_lock, flags);
1346 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1347 spin_unlock_irqrestore(io_lock, flags);
1351 spin_unlock_irqrestore(io_lock, flags);
1352 goto cleanup_scsi_cmd;
1357 spin_unlock_irqrestore(io_lock, flags);
1360 * If there is a scsi_cmnd associated with this io_req, then
1361 * free the corresponding state
1363 start_time = io_req->start_time;
1364 fnic_release_ioreq_buf(fnic, io_req, sc);
1365 mempool_free(io_req, fnic->io_req_pool);
1368 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1369 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1370 "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1371 __func__, (jiffies - start_time));
1373 if (atomic64_read(&fnic->io_cmpl_skip))
1374 atomic64_dec(&fnic->io_cmpl_skip);
1376 atomic64_inc(&fnic_stats->io_stats.io_completions);
1378 /* Complete the command to SCSI */
1379 if (sc->scsi_done) {
1380 FNIC_TRACE(fnic_cleanup_io,
1381 sc->device->host->host_no, i, sc,
1382 jiffies_to_msecs(jiffies - start_time),
1383 0, ((u64)sc->cmnd[0] << 32 |
1384 (u64)sc->cmnd[2] << 24 |
1385 (u64)sc->cmnd[3] << 16 |
1386 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1387 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1394 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1395 struct fcpio_host_req *desc)
1398 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1399 struct fnic_io_req *io_req;
1400 struct scsi_cmnd *sc;
1401 unsigned long flags;
1402 spinlock_t *io_lock;
1403 unsigned long start_time = 0;
1405 /* get the tag reference */
1406 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1407 id &= FNIC_TAG_MASK;
1409 if (id >= fnic->fnic_max_tag_id)
1412 sc = scsi_host_find_tag(fnic->lport->host, id);
1416 io_lock = fnic_io_lock_hash(fnic, sc);
1417 spin_lock_irqsave(io_lock, flags);
1419 /* Get the IO context which this desc refers to */
1420 io_req = (struct fnic_io_req *)CMD_SP(sc);
1422 /* fnic interrupts are turned off by now */
1425 spin_unlock_irqrestore(io_lock, flags);
1426 goto wq_copy_cleanup_scsi_cmd;
1431 spin_unlock_irqrestore(io_lock, flags);
1433 start_time = io_req->start_time;
1434 fnic_release_ioreq_buf(fnic, io_req, sc);
1435 mempool_free(io_req, fnic->io_req_pool);
1437 wq_copy_cleanup_scsi_cmd:
1438 sc->result = DID_NO_CONNECT << 16;
1439 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1440 " DID_NO_CONNECT\n");
1442 if (sc->scsi_done) {
1443 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1444 sc->device->host->host_no, id, sc,
1445 jiffies_to_msecs(jiffies - start_time),
1446 0, ((u64)sc->cmnd[0] << 32 |
1447 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1448 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1449 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1455 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1456 u32 task_req, u8 *fc_lun,
1457 struct fnic_io_req *io_req)
1459 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1460 struct Scsi_Host *host = fnic->lport->host;
1461 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1462 unsigned long flags;
1464 spin_lock_irqsave(host->host_lock, flags);
1465 if (unlikely(fnic_chk_state_flags_locked(fnic,
1466 FNIC_FLAGS_IO_BLOCKED))) {
1467 spin_unlock_irqrestore(host->host_lock, flags);
1470 atomic_inc(&fnic->in_flight);
1471 spin_unlock_irqrestore(host->host_lock, flags);
1473 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1475 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1476 free_wq_copy_descs(fnic, wq);
1478 if (!vnic_wq_copy_desc_avail(wq)) {
1479 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1480 atomic_dec(&fnic->in_flight);
1481 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1482 "fnic_queue_abort_io_req: failure: no descriptors\n");
1483 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1486 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1487 0, task_req, tag, fc_lun, io_req->port_id,
1488 fnic->config.ra_tov, fnic->config.ed_tov);
1490 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1491 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1492 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1493 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1494 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1496 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1497 atomic_dec(&fnic->in_flight);
1502 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1507 struct fnic_io_req *io_req;
1508 spinlock_t *io_lock;
1509 unsigned long flags;
1510 struct scsi_cmnd *sc;
1511 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1512 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1513 struct scsi_lun fc_lun;
1514 enum fnic_ioreq_state old_ioreq_state;
1516 FNIC_SCSI_DBG(KERN_DEBUG,
1518 "fnic_rport_exch_reset called portid 0x%06x\n",
1521 if (fnic->in_remove)
1524 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1526 io_lock = fnic_io_lock_tag(fnic, tag);
1527 spin_lock_irqsave(io_lock, flags);
1528 sc = scsi_host_find_tag(fnic->lport->host, tag);
1530 spin_unlock_irqrestore(io_lock, flags);
1534 io_req = (struct fnic_io_req *)CMD_SP(sc);
1536 if (!io_req || io_req->port_id != port_id) {
1537 spin_unlock_irqrestore(io_lock, flags);
1541 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1542 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1543 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1544 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1546 spin_unlock_irqrestore(io_lock, flags);
1551 * Found IO that is still pending with firmware and
1552 * belongs to rport that went away
1554 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1555 spin_unlock_irqrestore(io_lock, flags);
1558 if (io_req->abts_done) {
1559 shost_printk(KERN_ERR, fnic->lport->host,
1560 "fnic_rport_exch_reset: io_req->abts_done is set "
1562 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1565 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1566 shost_printk(KERN_ERR, fnic->lport->host,
1568 "IO not yet issued %p tag 0x%x flags "
1570 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1572 old_ioreq_state = CMD_STATE(sc);
1573 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1574 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1575 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1576 atomic64_inc(&reset_stats->device_reset_terminates);
1577 abt_tag = (tag | FNIC_TAG_DEV_RST);
1578 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1579 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1583 BUG_ON(io_req->abts_done);
1585 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1586 "fnic_rport_reset_exch: Issuing abts\n");
1588 spin_unlock_irqrestore(io_lock, flags);
1590 /* Now queue the abort command to firmware */
1591 int_to_scsilun(sc->device->lun, &fc_lun);
1593 if (fnic_queue_abort_io_req(fnic, abt_tag,
1594 FCPIO_ITMF_ABT_TASK_TERM,
1595 fc_lun.scsi_lun, io_req)) {
1597 * Revert the cmd state back to old state, if
1598 * it hasn't changed in between. This cmd will get
1599 * aborted later by scsi_eh, or cleaned up during
1602 spin_lock_irqsave(io_lock, flags);
1603 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1604 CMD_STATE(sc) = old_ioreq_state;
1605 spin_unlock_irqrestore(io_lock, flags);
1607 spin_lock_irqsave(io_lock, flags);
1608 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1609 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1611 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1612 spin_unlock_irqrestore(io_lock, flags);
1613 atomic64_inc(&term_stats->terminates);
1617 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1618 atomic64_set(&term_stats->max_terminates, term_cnt);
1622 void fnic_terminate_rport_io(struct fc_rport *rport)
1627 struct fnic_io_req *io_req;
1628 spinlock_t *io_lock;
1629 unsigned long flags;
1630 struct scsi_cmnd *sc;
1631 struct scsi_lun fc_lun;
1632 struct fc_rport_libfc_priv *rdata;
1633 struct fc_lport *lport;
1635 struct fc_rport *cmd_rport;
1636 struct reset_stats *reset_stats;
1637 struct terminate_stats *term_stats;
1638 enum fnic_ioreq_state old_ioreq_state;
1641 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1644 rdata = rport->dd_data;
1647 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1650 lport = rdata->local_port;
1653 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1656 fnic = lport_priv(lport);
1657 FNIC_SCSI_DBG(KERN_DEBUG,
1658 fnic->lport->host, "fnic_terminate_rport_io called"
1659 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1660 rport->port_name, rport->node_name, rport,
1663 if (fnic->in_remove)
1666 reset_stats = &fnic->fnic_stats.reset_stats;
1667 term_stats = &fnic->fnic_stats.term_stats;
1669 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1671 io_lock = fnic_io_lock_tag(fnic, tag);
1672 spin_lock_irqsave(io_lock, flags);
1673 sc = scsi_host_find_tag(fnic->lport->host, tag);
1675 spin_unlock_irqrestore(io_lock, flags);
1679 cmd_rport = starget_to_rport(scsi_target(sc->device));
1680 if (rport != cmd_rport) {
1681 spin_unlock_irqrestore(io_lock, flags);
1685 io_req = (struct fnic_io_req *)CMD_SP(sc);
1687 if (!io_req || rport != cmd_rport) {
1688 spin_unlock_irqrestore(io_lock, flags);
1692 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1693 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1694 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1695 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1697 spin_unlock_irqrestore(io_lock, flags);
1701 * Found IO that is still pending with firmware and
1702 * belongs to rport that went away
1704 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1705 spin_unlock_irqrestore(io_lock, flags);
1708 if (io_req->abts_done) {
1709 shost_printk(KERN_ERR, fnic->lport->host,
1710 "fnic_terminate_rport_io: io_req->abts_done is set "
1712 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1714 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1715 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1716 "fnic_terminate_rport_io "
1717 "IO not yet issued %p tag 0x%x flags "
1719 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1721 old_ioreq_state = CMD_STATE(sc);
1722 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1723 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1724 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1725 atomic64_inc(&reset_stats->device_reset_terminates);
1726 abt_tag = (tag | FNIC_TAG_DEV_RST);
1727 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1728 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1731 BUG_ON(io_req->abts_done);
1733 FNIC_SCSI_DBG(KERN_DEBUG,
1735 "fnic_terminate_rport_io: Issuing abts\n");
1737 spin_unlock_irqrestore(io_lock, flags);
1739 /* Now queue the abort command to firmware */
1740 int_to_scsilun(sc->device->lun, &fc_lun);
1742 if (fnic_queue_abort_io_req(fnic, abt_tag,
1743 FCPIO_ITMF_ABT_TASK_TERM,
1744 fc_lun.scsi_lun, io_req)) {
1746 * Revert the cmd state back to old state, if
1747 * it hasn't changed in between. This cmd will get
1748 * aborted later by scsi_eh, or cleaned up during
1751 spin_lock_irqsave(io_lock, flags);
1752 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1753 CMD_STATE(sc) = old_ioreq_state;
1754 spin_unlock_irqrestore(io_lock, flags);
1756 spin_lock_irqsave(io_lock, flags);
1757 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1758 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1760 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1761 spin_unlock_irqrestore(io_lock, flags);
1762 atomic64_inc(&term_stats->terminates);
1766 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1767 atomic64_set(&term_stats->max_terminates, term_cnt);
1772 * This function is exported to SCSI for sending abort cmnds.
1773 * A SCSI IO is represented by a io_req in the driver.
1774 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1776 int fnic_abort_cmd(struct scsi_cmnd *sc)
1778 struct fc_lport *lp;
1780 struct fnic_io_req *io_req = NULL;
1781 struct fc_rport *rport;
1782 spinlock_t *io_lock;
1783 unsigned long flags;
1784 unsigned long start_time = 0;
1787 struct scsi_lun fc_lun;
1788 struct fnic_stats *fnic_stats;
1789 struct abort_stats *abts_stats;
1790 struct terminate_stats *term_stats;
1791 enum fnic_ioreq_state old_ioreq_state;
1793 DECLARE_COMPLETION_ONSTACK(tm_done);
1795 /* Wait for rport to unblock */
1796 fc_block_scsi_eh(sc);
1798 /* Get local-port, check ready and link up */
1799 lp = shost_priv(sc->device->host);
1801 fnic = lport_priv(lp);
1802 fnic_stats = &fnic->fnic_stats;
1803 abts_stats = &fnic->fnic_stats.abts_stats;
1804 term_stats = &fnic->fnic_stats.term_stats;
1806 rport = starget_to_rport(scsi_target(sc->device));
1807 tag = sc->request->tag;
1808 FNIC_SCSI_DBG(KERN_DEBUG,
1810 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1811 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1813 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1815 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1817 goto fnic_abort_cmd_end;
1821 * Avoid a race between SCSI issuing the abort and the device
1822 * completing the command.
1824 * If the command is already completed by the fw cmpl code,
1825 * we just return SUCCESS from here. This means that the abort
1826 * succeeded. In the SCSI ML, since the timeout for command has
1827 * happened, the completion wont actually complete the command
1828 * and it will be considered as an aborted command
1830 * The CMD_SP will not be cleared except while holding io_req_lock.
1832 io_lock = fnic_io_lock_hash(fnic, sc);
1833 spin_lock_irqsave(io_lock, flags);
1834 io_req = (struct fnic_io_req *)CMD_SP(sc);
1836 spin_unlock_irqrestore(io_lock, flags);
1837 goto fnic_abort_cmd_end;
1840 io_req->abts_done = &tm_done;
1842 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1843 spin_unlock_irqrestore(io_lock, flags);
1847 * Command is still pending, need to abort it
1848 * If the firmware completes the command after this point,
1849 * the completion wont be done till mid-layer, since abort
1850 * has already started.
1852 old_ioreq_state = CMD_STATE(sc);
1853 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1854 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1856 spin_unlock_irqrestore(io_lock, flags);
1859 * Check readiness of the remote port. If the path to remote
1860 * port is up, then send abts to the remote port to terminate
1861 * the IO. Else, just locally terminate the IO in the firmware
1863 if (fc_remote_port_chkready(rport) == 0)
1864 task_req = FCPIO_ITMF_ABT_TASK;
1866 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1867 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1870 /* Now queue the abort command to firmware */
1871 int_to_scsilun(sc->device->lun, &fc_lun);
1873 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1874 fc_lun.scsi_lun, io_req)) {
1875 spin_lock_irqsave(io_lock, flags);
1876 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1877 CMD_STATE(sc) = old_ioreq_state;
1878 io_req = (struct fnic_io_req *)CMD_SP(sc);
1880 io_req->abts_done = NULL;
1881 spin_unlock_irqrestore(io_lock, flags);
1883 goto fnic_abort_cmd_end;
1885 if (task_req == FCPIO_ITMF_ABT_TASK) {
1886 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1887 atomic64_inc(&fnic_stats->abts_stats.aborts);
1889 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1890 atomic64_inc(&fnic_stats->term_stats.terminates);
1894 * We queued an abort IO, wait for its completion.
1895 * Once the firmware completes the abort command, it will
1896 * wake up this thread.
1899 wait_for_completion_timeout(&tm_done,
1901 (2 * fnic->config.ra_tov +
1902 fnic->config.ed_tov));
1904 /* Check the abort status */
1905 spin_lock_irqsave(io_lock, flags);
1907 io_req = (struct fnic_io_req *)CMD_SP(sc);
1909 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1910 spin_unlock_irqrestore(io_lock, flags);
1911 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1913 goto fnic_abort_cmd_end;
1915 io_req->abts_done = NULL;
1917 /* fw did not complete abort, timed out */
1918 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1919 spin_unlock_irqrestore(io_lock, flags);
1920 if (task_req == FCPIO_ITMF_ABT_TASK) {
1921 atomic64_inc(&abts_stats->abort_drv_timeouts);
1923 atomic64_inc(&term_stats->terminate_drv_timeouts);
1925 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1927 goto fnic_abort_cmd_end;
1930 /* IO out of order */
1932 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1933 spin_unlock_irqrestore(io_lock, flags);
1934 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1935 "Issuing Host reset due to out of order IO\n");
1937 if (fnic_host_reset(sc) == FAILED) {
1938 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1939 "fnic_host_reset failed.\n");
1942 goto fnic_abort_cmd_end;
1945 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1947 start_time = io_req->start_time;
1949 * firmware completed the abort, check the status,
1950 * free the io_req if successful. If abort fails,
1951 * Device reset will clean the I/O.
1953 if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
1957 spin_unlock_irqrestore(io_lock, flags);
1958 goto fnic_abort_cmd_end;
1961 spin_unlock_irqrestore(io_lock, flags);
1963 fnic_release_ioreq_buf(fnic, io_req, sc);
1964 mempool_free(io_req, fnic->io_req_pool);
1966 if (sc->scsi_done) {
1967 /* Call SCSI completion function to complete the IO */
1968 sc->result = (DID_ABORT << 16);
1970 atomic64_dec(&fnic_stats->io_stats.active_ios);
1971 if (atomic64_read(&fnic->io_cmpl_skip))
1972 atomic64_dec(&fnic->io_cmpl_skip);
1974 atomic64_inc(&fnic_stats->io_stats.io_completions);
1978 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
1979 sc->request->tag, sc,
1980 jiffies_to_msecs(jiffies - start_time),
1981 0, ((u64)sc->cmnd[0] << 32 |
1982 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1983 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1984 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1986 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1987 "Returning from abort cmd type %x %s\n", task_req,
1989 "SUCCESS" : "FAILED");
1993 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1994 struct scsi_cmnd *sc,
1995 struct fnic_io_req *io_req)
1997 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1998 struct Scsi_Host *host = fnic->lport->host;
1999 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2000 struct scsi_lun fc_lun;
2002 unsigned long intr_flags;
2004 spin_lock_irqsave(host->host_lock, intr_flags);
2005 if (unlikely(fnic_chk_state_flags_locked(fnic,
2006 FNIC_FLAGS_IO_BLOCKED))) {
2007 spin_unlock_irqrestore(host->host_lock, intr_flags);
2010 atomic_inc(&fnic->in_flight);
2011 spin_unlock_irqrestore(host->host_lock, intr_flags);
2013 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2015 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2016 free_wq_copy_descs(fnic, wq);
2018 if (!vnic_wq_copy_desc_avail(wq)) {
2019 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2020 "queue_dr_io_req failure - no descriptors\n");
2021 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2026 /* fill in the lun info */
2027 int_to_scsilun(sc->device->lun, &fc_lun);
2029 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2030 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2031 fc_lun.scsi_lun, io_req->port_id,
2032 fnic->config.ra_tov, fnic->config.ed_tov);
2034 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2035 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2036 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2037 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2038 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2041 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2042 atomic_dec(&fnic->in_flight);
2048 * Clean up any pending aborts on the lun
2049 * For each outstanding IO on this lun, whose abort is not completed by fw,
2050 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2051 * successfully aborted, 1 otherwise
2053 static int fnic_clean_pending_aborts(struct fnic *fnic,
2054 struct scsi_cmnd *lr_sc,
2059 struct fnic_io_req *io_req;
2060 spinlock_t *io_lock;
2061 unsigned long flags;
2063 struct scsi_cmnd *sc;
2064 struct scsi_lun fc_lun;
2065 struct scsi_device *lun_dev = lr_sc->device;
2066 DECLARE_COMPLETION_ONSTACK(tm_done);
2067 enum fnic_ioreq_state old_ioreq_state;
2069 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2070 io_lock = fnic_io_lock_tag(fnic, tag);
2071 spin_lock_irqsave(io_lock, flags);
2072 sc = scsi_host_find_tag(fnic->lport->host, tag);
2074 * ignore this lun reset cmd if issued using new SC
2075 * or cmds that do not belong to this lun
2077 if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2078 spin_unlock_irqrestore(io_lock, flags);
2082 io_req = (struct fnic_io_req *)CMD_SP(sc);
2084 if (!io_req || sc->device != lun_dev) {
2085 spin_unlock_irqrestore(io_lock, flags);
2090 * Found IO that is still pending with firmware and
2091 * belongs to the LUN that we are resetting
2093 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2094 "Found IO in %s on lun\n",
2095 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2097 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2098 spin_unlock_irqrestore(io_lock, flags);
2101 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2102 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2103 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2104 "%s dev rst not pending sc 0x%p\n", __func__,
2106 spin_unlock_irqrestore(io_lock, flags);
2110 if (io_req->abts_done)
2111 shost_printk(KERN_ERR, fnic->lport->host,
2112 "%s: io_req->abts_done is set state is %s\n",
2113 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2114 old_ioreq_state = CMD_STATE(sc);
2116 * Any pending IO issued prior to reset is expected to be
2117 * in abts pending state, if not we need to set
2118 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2119 * When IO is completed, the IO will be handed over and
2120 * handled in this function.
2122 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2124 BUG_ON(io_req->abts_done);
2127 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2128 abt_tag |= FNIC_TAG_DEV_RST;
2129 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2130 "%s: dev rst sc 0x%p\n", __func__, sc);
2133 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2134 io_req->abts_done = &tm_done;
2135 spin_unlock_irqrestore(io_lock, flags);
2137 /* Now queue the abort command to firmware */
2138 int_to_scsilun(sc->device->lun, &fc_lun);
2140 if (fnic_queue_abort_io_req(fnic, abt_tag,
2141 FCPIO_ITMF_ABT_TASK_TERM,
2142 fc_lun.scsi_lun, io_req)) {
2143 spin_lock_irqsave(io_lock, flags);
2144 io_req = (struct fnic_io_req *)CMD_SP(sc);
2146 io_req->abts_done = NULL;
2147 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2148 CMD_STATE(sc) = old_ioreq_state;
2149 spin_unlock_irqrestore(io_lock, flags);
2151 goto clean_pending_aborts_end;
2153 spin_lock_irqsave(io_lock, flags);
2154 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2155 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2156 spin_unlock_irqrestore(io_lock, flags);
2158 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2160 wait_for_completion_timeout(&tm_done,
2162 (fnic->config.ed_tov));
2164 /* Recheck cmd state to check if it is now aborted */
2165 spin_lock_irqsave(io_lock, flags);
2166 io_req = (struct fnic_io_req *)CMD_SP(sc);
2168 spin_unlock_irqrestore(io_lock, flags);
2169 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2173 io_req->abts_done = NULL;
2175 /* if abort is still pending with fw, fail */
2176 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2177 spin_unlock_irqrestore(io_lock, flags);
2178 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2180 goto clean_pending_aborts_end;
2182 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2184 /* original sc used for lr is handled by dev reset code */
2187 spin_unlock_irqrestore(io_lock, flags);
2189 /* original sc used for lr is handled by dev reset code */
2191 fnic_release_ioreq_buf(fnic, io_req, sc);
2192 mempool_free(io_req, fnic->io_req_pool);
2196 * Any IO is returned during reset, it needs to call scsi_done
2197 * to return the scsi_cmnd to upper layer.
2199 if (sc->scsi_done) {
2200 /* Set result to let upper SCSI layer retry */
2201 sc->result = DID_RESET << 16;
2206 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2208 /* walk again to check, if IOs are still pending in fw */
2209 if (fnic_is_abts_pending(fnic, lr_sc))
2212 clean_pending_aborts_end:
2217 * fnic_scsi_host_start_tag
2218 * Allocates tagid from host's tag list
2221 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2223 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2224 int tag, ret = SCSI_NO_TAG;
2228 pr_err("Tags are not supported\n");
2233 tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2234 if (tag >= bqt->max_depth) {
2235 pr_err("Tag allocation failure\n");
2238 } while (test_and_set_bit(tag, bqt->tag_map));
2240 bqt->tag_index[tag] = sc->request;
2241 sc->request->tag = tag;
2243 if (!sc->request->special)
2244 sc->request->special = sc;
2253 * fnic_scsi_host_end_tag
2254 * frees tag allocated by fnic_scsi_host_start_tag.
2257 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2259 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2260 int tag = sc->request->tag;
2262 if (tag == SCSI_NO_TAG)
2265 BUG_ON(!bqt || !bqt->tag_index[tag]);
2269 bqt->tag_index[tag] = NULL;
2270 clear_bit(tag, bqt->tag_map);
2276 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2277 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2280 int fnic_device_reset(struct scsi_cmnd *sc)
2282 struct fc_lport *lp;
2284 struct fnic_io_req *io_req = NULL;
2285 struct fc_rport *rport;
2288 spinlock_t *io_lock;
2289 unsigned long flags;
2290 unsigned long start_time = 0;
2291 struct scsi_lun fc_lun;
2292 struct fnic_stats *fnic_stats;
2293 struct reset_stats *reset_stats;
2295 DECLARE_COMPLETION_ONSTACK(tm_done);
2296 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2299 /* Wait for rport to unblock */
2300 fc_block_scsi_eh(sc);
2302 /* Get local-port, check ready and link up */
2303 lp = shost_priv(sc->device->host);
2305 fnic = lport_priv(lp);
2306 fnic_stats = &fnic->fnic_stats;
2307 reset_stats = &fnic->fnic_stats.reset_stats;
2309 atomic64_inc(&reset_stats->device_resets);
2311 rport = starget_to_rport(scsi_target(sc->device));
2312 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2313 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2314 rport->port_id, sc->device->lun, sc);
2316 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2317 goto fnic_device_reset_end;
2319 /* Check if remote port up */
2320 if (fc_remote_port_chkready(rport)) {
2321 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2322 goto fnic_device_reset_end;
2325 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2326 /* Allocate tag if not present */
2328 tag = sc->request->tag;
2329 if (unlikely(tag < 0)) {
2331 * XXX(hch): current the midlayer fakes up a struct
2332 * request for the explicit reset ioctls, and those
2333 * don't have a tag allocated to them. The below
2334 * code pokes into midlayer structures to paper over
2335 * this design issue, but that won't work for blk-mq.
2337 * Either someone who can actually test the hardware
2338 * will have to come up with a similar hack for the
2339 * blk-mq case, or we'll have to bite the bullet and
2340 * fix the way the EH ioctls work for real, but until
2341 * that happens we fail these explicit requests here.
2344 tag = fnic_scsi_host_start_tag(fnic, sc);
2345 if (unlikely(tag == SCSI_NO_TAG))
2346 goto fnic_device_reset_end;
2350 io_lock = fnic_io_lock_hash(fnic, sc);
2351 spin_lock_irqsave(io_lock, flags);
2352 io_req = (struct fnic_io_req *)CMD_SP(sc);
2355 * If there is a io_req attached to this command, then use it,
2356 * else allocate a new one.
2359 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2361 spin_unlock_irqrestore(io_lock, flags);
2362 goto fnic_device_reset_end;
2364 memset(io_req, 0, sizeof(*io_req));
2365 io_req->port_id = rport->port_id;
2366 CMD_SP(sc) = (char *)io_req;
2368 io_req->dr_done = &tm_done;
2369 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2370 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2371 spin_unlock_irqrestore(io_lock, flags);
2373 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2376 * issue the device reset, if enqueue failed, clean up the ioreq
2377 * and break assoc with scsi cmd
2379 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2380 spin_lock_irqsave(io_lock, flags);
2381 io_req = (struct fnic_io_req *)CMD_SP(sc);
2383 io_req->dr_done = NULL;
2384 goto fnic_device_reset_clean;
2386 spin_lock_irqsave(io_lock, flags);
2387 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2388 spin_unlock_irqrestore(io_lock, flags);
2391 * Wait on the local completion for LUN reset. The io_req may be
2392 * freed while we wait since we hold no lock.
2394 wait_for_completion_timeout(&tm_done,
2395 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2397 spin_lock_irqsave(io_lock, flags);
2398 io_req = (struct fnic_io_req *)CMD_SP(sc);
2400 spin_unlock_irqrestore(io_lock, flags);
2401 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2402 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2403 goto fnic_device_reset_end;
2405 io_req->dr_done = NULL;
2407 status = CMD_LR_STATUS(sc);
2410 * If lun reset not completed, bail out with failed. io_req
2411 * gets cleaned up during higher levels of EH
2413 if (status == FCPIO_INVALID_CODE) {
2414 atomic64_inc(&reset_stats->device_reset_timeouts);
2415 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2416 "Device reset timed out\n");
2417 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2418 spin_unlock_irqrestore(io_lock, flags);
2419 int_to_scsilun(sc->device->lun, &fc_lun);
2421 * Issue abort and terminate on device reset request.
2422 * If q'ing of terminate fails, retry it after a delay.
2425 spin_lock_irqsave(io_lock, flags);
2426 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2427 spin_unlock_irqrestore(io_lock, flags);
2430 spin_unlock_irqrestore(io_lock, flags);
2431 if (fnic_queue_abort_io_req(fnic,
2432 tag | FNIC_TAG_DEV_RST,
2433 FCPIO_ITMF_ABT_TASK_TERM,
2434 fc_lun.scsi_lun, io_req)) {
2435 wait_for_completion_timeout(&tm_done,
2436 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2438 spin_lock_irqsave(io_lock, flags);
2439 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2440 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2441 io_req->abts_done = &tm_done;
2442 spin_unlock_irqrestore(io_lock, flags);
2443 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2444 "Abort and terminate issued on Device reset "
2445 "tag 0x%x sc 0x%p\n", tag, sc);
2450 spin_lock_irqsave(io_lock, flags);
2451 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2452 spin_unlock_irqrestore(io_lock, flags);
2453 wait_for_completion_timeout(&tm_done,
2454 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2457 io_req = (struct fnic_io_req *)CMD_SP(sc);
2458 io_req->abts_done = NULL;
2459 goto fnic_device_reset_clean;
2463 spin_unlock_irqrestore(io_lock, flags);
2466 /* Completed, but not successful, clean up the io_req, return fail */
2467 if (status != FCPIO_SUCCESS) {
2468 spin_lock_irqsave(io_lock, flags);
2469 FNIC_SCSI_DBG(KERN_DEBUG,
2471 "Device reset completed - failed\n");
2472 io_req = (struct fnic_io_req *)CMD_SP(sc);
2473 goto fnic_device_reset_clean;
2477 * Clean up any aborts on this lun that have still not
2478 * completed. If any of these fail, then LUN reset fails.
2479 * clean_pending_aborts cleans all cmds on this lun except
2480 * the lun reset cmd. If all cmds get cleaned, the lun reset
2483 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2484 spin_lock_irqsave(io_lock, flags);
2485 io_req = (struct fnic_io_req *)CMD_SP(sc);
2486 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2487 "Device reset failed"
2488 " since could not abort all IOs\n");
2489 goto fnic_device_reset_clean;
2492 /* Clean lun reset command */
2493 spin_lock_irqsave(io_lock, flags);
2494 io_req = (struct fnic_io_req *)CMD_SP(sc);
2496 /* Completed, and successful */
2499 fnic_device_reset_clean:
2503 spin_unlock_irqrestore(io_lock, flags);
2506 start_time = io_req->start_time;
2507 fnic_release_ioreq_buf(fnic, io_req, sc);
2508 mempool_free(io_req, fnic->io_req_pool);
2511 fnic_device_reset_end:
2512 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2513 sc->request->tag, sc,
2514 jiffies_to_msecs(jiffies - start_time),
2515 0, ((u64)sc->cmnd[0] << 32 |
2516 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2517 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2518 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2520 /* free tag if it is allocated */
2521 if (unlikely(tag_gen_flag))
2522 fnic_scsi_host_end_tag(fnic, sc);
2524 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2525 "Returning from device reset %s\n",
2527 "SUCCESS" : "FAILED");
2530 atomic64_inc(&reset_stats->device_reset_failures);
2535 /* Clean up all IOs, clean up libFC local port */
2536 int fnic_reset(struct Scsi_Host *shost)
2538 struct fc_lport *lp;
2541 struct reset_stats *reset_stats;
2543 lp = shost_priv(shost);
2544 fnic = lport_priv(lp);
2545 reset_stats = &fnic->fnic_stats.reset_stats;
2547 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2548 "fnic_reset called\n");
2550 atomic64_inc(&reset_stats->fnic_resets);
2553 * Reset local port, this will clean up libFC exchanges,
2554 * reset remote port sessions, and if link is up, begin flogi
2556 ret = fc_lport_reset(lp);
2558 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2559 "Returning from fnic reset %s\n",
2561 "SUCCESS" : "FAILED");
2564 atomic64_inc(&reset_stats->fnic_reset_completions);
2566 atomic64_inc(&reset_stats->fnic_reset_failures);
2572 * SCSI Error handling calls driver's eh_host_reset if all prior
2573 * error handling levels return FAILED. If host reset completes
2574 * successfully, and if link is up, then Fabric login begins.
2576 * Host Reset is the highest level of error recovery. If this fails, then
2577 * host is offlined by SCSI.
2580 int fnic_host_reset(struct scsi_cmnd *sc)
2583 unsigned long wait_host_tmo;
2584 struct Scsi_Host *shost = sc->device->host;
2585 struct fc_lport *lp = shost_priv(shost);
2586 struct fnic *fnic = lport_priv(lp);
2587 unsigned long flags;
2589 spin_lock_irqsave(&fnic->fnic_lock, flags);
2590 if (fnic->internal_reset_inprogress == 0) {
2591 fnic->internal_reset_inprogress = 1;
2593 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2594 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2595 "host reset in progress skipping another host reset\n");
2598 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2601 * If fnic_reset is successful, wait for fabric login to complete
2602 * scsi-ml tries to send a TUR to every device if host reset is
2603 * successful, so before returning to scsi, fabric should be up
2605 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2606 if (ret == SUCCESS) {
2607 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2609 while (time_before(jiffies, wait_host_tmo)) {
2610 if ((lp->state == LPORT_ST_READY) &&
2619 spin_lock_irqsave(&fnic->fnic_lock, flags);
2620 fnic->internal_reset_inprogress = 0;
2621 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2626 * This fxn is called from libFC when host is removed
2628 void fnic_scsi_abort_io(struct fc_lport *lp)
2631 unsigned long flags;
2632 enum fnic_state old_state;
2633 struct fnic *fnic = lport_priv(lp);
2634 DECLARE_COMPLETION_ONSTACK(remove_wait);
2636 /* Issue firmware reset for fnic, wait for reset to complete */
2638 spin_lock_irqsave(&fnic->fnic_lock, flags);
2639 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2640 /* fw reset is in progress, poll for its completion */
2641 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2642 schedule_timeout(msecs_to_jiffies(100));
2643 goto retry_fw_reset;
2646 fnic->remove_wait = &remove_wait;
2647 old_state = fnic->state;
2648 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2649 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2650 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2652 err = fnic_fw_reset_handler(fnic);
2654 spin_lock_irqsave(&fnic->fnic_lock, flags);
2655 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2656 fnic->state = old_state;
2657 fnic->remove_wait = NULL;
2658 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2662 /* Wait for firmware reset to complete */
2663 wait_for_completion_timeout(&remove_wait,
2664 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2666 spin_lock_irqsave(&fnic->fnic_lock, flags);
2667 fnic->remove_wait = NULL;
2668 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2669 "fnic_scsi_abort_io %s\n",
2670 (fnic->state == FNIC_IN_ETH_MODE) ?
2671 "SUCCESS" : "FAILED");
2672 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2677 * This fxn called from libFC to clean up driver IO state on link down
2679 void fnic_scsi_cleanup(struct fc_lport *lp)
2681 unsigned long flags;
2682 enum fnic_state old_state;
2683 struct fnic *fnic = lport_priv(lp);
2685 /* issue fw reset */
2687 spin_lock_irqsave(&fnic->fnic_lock, flags);
2688 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2689 /* fw reset is in progress, poll for its completion */
2690 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2691 schedule_timeout(msecs_to_jiffies(100));
2692 goto retry_fw_reset;
2694 old_state = fnic->state;
2695 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2696 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2697 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2699 if (fnic_fw_reset_handler(fnic)) {
2700 spin_lock_irqsave(&fnic->fnic_lock, flags);
2701 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2702 fnic->state = old_state;
2703 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2708 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2712 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2714 struct fnic *fnic = lport_priv(lp);
2716 /* Non-zero sid, nothing to do */
2718 goto call_fc_exch_mgr_reset;
2721 fnic_rport_exch_reset(fnic, did);
2722 goto call_fc_exch_mgr_reset;
2727 * link down or device being removed
2729 if (!fnic->in_remove)
2730 fnic_scsi_cleanup(lp);
2732 fnic_scsi_abort_io(lp);
2734 /* call libFC exch mgr reset to reset its exchanges */
2735 call_fc_exch_mgr_reset:
2736 fc_exch_mgr_reset(lp, sid, did);
2741 * fnic_is_abts_pending() is a helper function that
2742 * walks through tag map to check if there is any IOs pending,if there is one,
2743 * then it returns 1 (true), otherwise 0 (false)
2744 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2745 * otherwise, it checks for all IOs.
2747 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2750 struct fnic_io_req *io_req;
2751 spinlock_t *io_lock;
2752 unsigned long flags;
2754 struct scsi_cmnd *sc;
2755 struct scsi_device *lun_dev = NULL;
2758 lun_dev = lr_sc->device;
2760 /* walk again to check, if IOs are still pending in fw */
2761 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2762 sc = scsi_host_find_tag(fnic->lport->host, tag);
2764 * ignore this lun reset cmd or cmds that do not belong to
2767 if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2770 io_lock = fnic_io_lock_hash(fnic, sc);
2771 spin_lock_irqsave(io_lock, flags);
2773 io_req = (struct fnic_io_req *)CMD_SP(sc);
2775 if (!io_req || sc->device != lun_dev) {
2776 spin_unlock_irqrestore(io_lock, flags);
2781 * Found IO that is still pending with firmware and
2782 * belongs to the LUN that we are resetting
2784 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2785 "Found IO in %s on lun\n",
2786 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2788 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2790 spin_unlock_irqrestore(io_lock, flags);