2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
42 const char *fnic_state_str[] = {
43 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
49 static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
57 static const char *fcpio_status_str[] = {
58 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
79 const char *fnic_state_to_str(unsigned int state)
81 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
84 return fnic_state_str[state];
87 static const char *fnic_ioreq_state_to_str(unsigned int state)
89 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90 !fnic_ioreq_state_str[state])
93 return fnic_ioreq_state_str[state];
96 static const char *fnic_fcpio_status_to_str(unsigned int status)
98 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
101 return fcpio_status_str[status];
104 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
106 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107 struct scsi_cmnd *sc)
109 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
111 return &fnic->io_req_lock[hash];
114 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
124 static void fnic_release_ioreq_buf(struct fnic *fnic,
125 struct fnic_io_req *io_req,
126 struct scsi_cmnd *sc)
128 if (io_req->sgl_list_pa)
129 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa)
138 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
142 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
145 /* if no Ack received from firmware, then nothing to clean */
146 if (!fnic->fw_ack_recd[0])
150 * Update desc_available count based on number of freed descriptors
151 * Account for wraparound
153 if (wq->to_clean_index <= fnic->fw_ack_index[0])
154 wq->ring.desc_avail += (fnic->fw_ack_index[0]
155 - wq->to_clean_index + 1);
157 wq->ring.desc_avail += (wq->ring.desc_count
159 + fnic->fw_ack_index[0] + 1);
162 * just bump clean index to ack_index+1 accounting for wraparound
163 * this will essentially free up all descriptors between
164 * to_clean_index and fw_ack_index, both inclusive
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
169 /* we have processed the acks received so far */
170 fnic->fw_ack_recd[0] = 0;
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
180 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181 unsigned long clearbits)
183 unsigned long flags = 0;
184 unsigned long host_lock_flags = 0;
186 spin_lock_irqsave(&fnic->fnic_lock, flags);
187 spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
190 fnic->state_flags &= ~st_flags;
192 fnic->state_flags |= st_flags;
194 spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
195 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
202 * fnic_fw_reset_handler
203 * Routine to send reset msg to fw
205 int fnic_fw_reset_handler(struct fnic *fnic)
207 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
211 /* indicate fwreset to io path */
212 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
214 skb_queue_purge(&fnic->frame_queue);
215 skb_queue_purge(&fnic->tx_queue);
217 /* wait for io cmpl */
218 while (atomic_read(&fnic->in_flight))
219 schedule_timeout(msecs_to_jiffies(1));
221 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
223 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
224 free_wq_copy_descs(fnic, wq);
226 if (!vnic_wq_copy_desc_avail(wq))
229 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
230 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
231 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
232 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
233 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
235 &fnic->fnic_stats.fw_stats.active_fw_reqs));
238 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
241 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
242 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
243 "Issued fw reset\n");
245 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
246 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
247 "Failed to issue fw reset\n");
255 * fnic_flogi_reg_handler
256 * Routine to send flogi register msg to fw
258 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
260 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
261 enum fcpio_flogi_reg_format_type format;
262 struct fc_lport *lp = fnic->lport;
267 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
269 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
270 free_wq_copy_descs(fnic, wq);
272 if (!vnic_wq_copy_desc_avail(wq)) {
274 goto flogi_reg_ioreq_end;
277 if (fnic->ctlr.map_dest) {
278 memset(gw_mac, 0xff, ETH_ALEN);
279 format = FCPIO_FLOGI_REG_DEF_DEST;
281 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
282 format = FCPIO_FLOGI_REG_GW_DEST;
285 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
286 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
289 lp->r_a_tov, lp->e_d_tov);
290 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
291 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
292 fc_id, fnic->data_src_addr, gw_mac);
294 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
295 format, fc_id, gw_mac);
296 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
297 "FLOGI reg issued fcid %x map %d dest %pM\n",
298 fc_id, fnic->ctlr.map_dest, gw_mac);
301 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
302 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
303 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
304 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
305 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
308 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
313 * fnic_queue_wq_copy_desc
314 * Routine to enqueue a wq copy desc
316 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
317 struct vnic_wq_copy *wq,
318 struct fnic_io_req *io_req,
319 struct scsi_cmnd *sc,
322 struct scatterlist *sg;
323 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
324 struct fc_rport_libfc_priv *rp = rport->dd_data;
325 struct host_sg_desc *desc;
326 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
328 unsigned long intr_flags;
331 struct scsi_lun fc_lun;
334 /* For each SGE, create a device desc entry */
335 desc = io_req->sgl_list;
336 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
337 desc->addr = cpu_to_le64(sg_dma_address(sg));
338 desc->len = cpu_to_le32(sg_dma_len(sg));
343 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
345 sizeof(io_req->sgl_list[0]) * sg_count,
347 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
348 printk(KERN_ERR "DMA mapping failed\n");
349 return SCSI_MLQUEUE_HOST_BUSY;
353 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
355 SCSI_SENSE_BUFFERSIZE,
357 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
358 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
359 sizeof(io_req->sgl_list[0]) * sg_count,
361 printk(KERN_ERR "DMA mapping failed\n");
362 return SCSI_MLQUEUE_HOST_BUSY;
365 int_to_scsilun(sc->device->lun, &fc_lun);
367 /* Enqueue the descriptor in the Copy WQ */
368 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
370 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
371 free_wq_copy_descs(fnic, wq);
373 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
374 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
375 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
376 "fnic_queue_wq_copy_desc failure - no descriptors\n");
377 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
378 return SCSI_MLQUEUE_HOST_BUSY;
382 if (sc->sc_data_direction == DMA_FROM_DEVICE)
383 flags = FCPIO_ICMND_RDDATA;
384 else if (sc->sc_data_direction == DMA_TO_DEVICE)
385 flags = FCPIO_ICMND_WRDATA;
388 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
389 (rp->flags & FC_RP_FLAGS_RETRY))
390 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
392 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
393 0, exch_flags, io_req->sgl_cnt,
394 SCSI_SENSE_BUFFERSIZE,
396 io_req->sense_buf_pa,
397 0, /* scsi cmd ref, always 0 */
398 FCPIO_ICMND_PTA_SIMPLE,
399 /* scsi pri and tag */
400 flags, /* command flags */
401 sc->cmnd, sc->cmd_len,
403 fc_lun.scsi_lun, io_req->port_id,
404 rport->maxframe_size, rp->r_a_tov,
407 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
408 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
409 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
410 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
411 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
413 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
419 * Routine to send a scsi cdb
420 * Called with host_lock held and interrupts disabled.
422 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
424 struct fc_lport *lp = shost_priv(sc->device->host);
425 struct fc_rport *rport;
426 struct fnic_io_req *io_req = NULL;
427 struct fnic *fnic = lport_priv(lp);
428 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
429 struct vnic_wq_copy *wq;
433 unsigned long flags = 0;
435 spinlock_t *io_lock = NULL;
436 int io_lock_acquired = 0;
437 struct fc_rport_libfc_priv *rp;
439 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
440 return SCSI_MLQUEUE_HOST_BUSY;
442 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
443 return SCSI_MLQUEUE_HOST_BUSY;
445 rport = starget_to_rport(scsi_target(sc->device));
447 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
448 "returning DID_NO_CONNECT for IO as rport is NULL\n");
449 sc->result = DID_NO_CONNECT << 16;
454 ret = fc_remote_port_chkready(rport);
456 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
457 "rport is not ready\n");
458 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
465 if (!rp || rp->rp_state == RPORT_ST_DELETE) {
466 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
467 "rport 0x%x removed, returning DID_NO_CONNECT\n",
470 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
471 sc->result = DID_NO_CONNECT<<16;
476 if (rp->rp_state != RPORT_ST_READY) {
477 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
478 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
479 rport->port_id, rp->rp_state);
481 sc->result = DID_IMM_RETRY << 16;
486 if (lp->state != LPORT_ST_READY || !(lp->link_up))
487 return SCSI_MLQUEUE_HOST_BUSY;
489 atomic_inc(&fnic->in_flight);
492 * Release host lock, use driver resource specific locks from here.
493 * Don't re-enable interrupts in case they were disabled prior to the
494 * caller disabling them.
496 spin_unlock(lp->host->host_lock);
497 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
498 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
500 /* Get a new io_req for this SCSI IO */
501 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
503 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
504 ret = SCSI_MLQUEUE_HOST_BUSY;
507 memset(io_req, 0, sizeof(*io_req));
509 /* Map the data buffer */
510 sg_count = scsi_dma_map(sc);
512 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
513 sc->request->tag, sc, 0, sc->cmnd[0],
514 sg_count, CMD_STATE(sc));
515 mempool_free(io_req, fnic->io_req_pool);
519 /* Determine the type of scatter/gather list we need */
520 io_req->sgl_cnt = sg_count;
521 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
522 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
523 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
527 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
529 if (!io_req->sgl_list) {
530 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
531 ret = SCSI_MLQUEUE_HOST_BUSY;
533 mempool_free(io_req, fnic->io_req_pool);
537 /* Cache sgl list allocated address before alignment */
538 io_req->sgl_list_alloc = io_req->sgl_list;
539 ptr = (unsigned long) io_req->sgl_list;
540 if (ptr % FNIC_SG_DESC_ALIGN) {
541 io_req->sgl_list = (struct host_sg_desc *)
542 (((unsigned long) ptr
543 + FNIC_SG_DESC_ALIGN - 1)
544 & ~(FNIC_SG_DESC_ALIGN - 1));
549 * Will acquire lock defore setting to IO initialized.
552 io_lock = fnic_io_lock_hash(fnic, sc);
553 spin_lock_irqsave(io_lock, flags);
555 /* initialize rest of io_req */
556 io_lock_acquired = 1;
557 io_req->port_id = rport->port_id;
558 io_req->start_time = jiffies;
559 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
560 CMD_SP(sc) = (char *)io_req;
561 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
562 sc->scsi_done = done;
564 /* create copy wq desc and enqueue it */
565 wq = &fnic->wq_copy[0];
566 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
569 * In case another thread cancelled the request,
570 * refetch the pointer under the lock.
572 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
573 sc->request->tag, sc, 0, 0, 0,
574 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
575 io_req = (struct fnic_io_req *)CMD_SP(sc);
577 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
578 spin_unlock_irqrestore(io_lock, flags);
580 fnic_release_ioreq_buf(fnic, io_req, sc);
581 mempool_free(io_req, fnic->io_req_pool);
583 atomic_dec(&fnic->in_flight);
584 /* acquire host lock before returning to SCSI */
585 spin_lock(lp->host->host_lock);
588 atomic64_inc(&fnic_stats->io_stats.active_ios);
589 atomic64_inc(&fnic_stats->io_stats.num_ios);
590 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
591 atomic64_read(&fnic_stats->io_stats.max_active_ios))
592 atomic64_set(&fnic_stats->io_stats.max_active_ios,
593 atomic64_read(&fnic_stats->io_stats.active_ios));
595 /* REVISIT: Use per IO lock in the final code */
596 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
599 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
600 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
601 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
604 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
605 sc->request->tag, sc, io_req,
607 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
609 /* if only we issued IO, will we have the io lock */
610 if (io_lock_acquired)
611 spin_unlock_irqrestore(io_lock, flags);
613 atomic_dec(&fnic->in_flight);
614 /* acquire host lock before returning to SCSI */
615 spin_lock(lp->host->host_lock);
619 DEF_SCSI_QCMD(fnic_queuecommand)
622 * fnic_fcpio_fw_reset_cmpl_handler
623 * Routine to handle fw reset completion
625 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
626 struct fcpio_fw_req *desc)
630 struct fcpio_tag tag;
633 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
635 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
637 atomic64_inc(&reset_stats->fw_reset_completions);
639 /* Clean up all outstanding io requests */
640 fnic_cleanup_io(fnic, SCSI_NO_TAG);
642 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
643 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
644 atomic64_set(&fnic->io_cmpl_skip, 0);
646 spin_lock_irqsave(&fnic->fnic_lock, flags);
648 /* fnic should be in FC_TRANS_ETH_MODE */
649 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
650 /* Check status of reset completion */
652 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
653 "reset cmpl success\n");
654 /* Ready to send flogi out */
655 fnic->state = FNIC_IN_ETH_MODE;
657 FNIC_SCSI_DBG(KERN_DEBUG,
659 "fnic fw_reset : failed %s\n",
660 fnic_fcpio_status_to_str(hdr_status));
663 * Unable to change to eth mode, cannot send out flogi
664 * Change state to fc mode, so that subsequent Flogi
665 * requests from libFC will cause more attempts to
666 * reset the firmware. Free the cached flogi
668 fnic->state = FNIC_IN_FC_MODE;
669 atomic64_inc(&reset_stats->fw_reset_failures);
673 FNIC_SCSI_DBG(KERN_DEBUG,
675 "Unexpected state %s while processing"
676 " reset cmpl\n", fnic_state_to_str(fnic->state));
677 atomic64_inc(&reset_stats->fw_reset_failures);
681 /* Thread removing device blocks till firmware reset is complete */
682 if (fnic->remove_wait)
683 complete(fnic->remove_wait);
686 * If fnic is being removed, or fw reset failed
687 * free the flogi frame. Else, send it out
689 if (fnic->remove_wait || ret) {
690 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
691 skb_queue_purge(&fnic->tx_queue);
692 goto reset_cmpl_handler_end;
695 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
699 reset_cmpl_handler_end:
700 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
706 * fnic_fcpio_flogi_reg_cmpl_handler
707 * Routine to handle flogi register completion
709 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
710 struct fcpio_fw_req *desc)
714 struct fcpio_tag tag;
718 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
720 /* Update fnic state based on status of flogi reg completion */
721 spin_lock_irqsave(&fnic->fnic_lock, flags);
723 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
725 /* Check flogi registration completion status */
727 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
728 "flog reg succeeded\n");
729 fnic->state = FNIC_IN_FC_MODE;
731 FNIC_SCSI_DBG(KERN_DEBUG,
733 "fnic flogi reg :failed %s\n",
734 fnic_fcpio_status_to_str(hdr_status));
735 fnic->state = FNIC_IN_ETH_MODE;
739 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
740 "Unexpected fnic state %s while"
741 " processing flogi reg completion\n",
742 fnic_state_to_str(fnic->state));
747 if (fnic->stop_rx_link_events) {
748 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
749 goto reg_cmpl_handler_end;
751 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
754 queue_work(fnic_event_queue, &fnic->frame_work);
756 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
759 reg_cmpl_handler_end:
763 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
766 if (wq->to_clean_index <= wq->to_use_index) {
767 /* out of range, stale request_out index */
768 if (request_out < wq->to_clean_index ||
769 request_out >= wq->to_use_index)
772 /* out of range, stale request_out index */
773 if (request_out < wq->to_clean_index &&
774 request_out >= wq->to_use_index)
777 /* request_out index is in range */
783 * Mark that ack received and store the Ack index. If there are multiple
784 * acks received before Tx thread cleans it up, the latest value will be
785 * used which is correct behavior. This state should be in the copy Wq
786 * instead of in the fnic
788 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
789 unsigned int cq_index,
790 struct fcpio_fw_req *desc)
792 struct vnic_wq_copy *wq;
793 u16 request_out = desc->u.ack.request_out;
795 u64 *ox_id_tag = (u64 *)(void *)desc;
797 /* mark the ack state */
798 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
799 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
801 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
802 if (is_ack_index_in_range(wq, request_out)) {
803 fnic->fw_ack_index[0] = request_out;
804 fnic->fw_ack_recd[0] = 1;
807 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
809 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
810 FNIC_TRACE(fnic_fcpio_ack_handler,
811 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
812 ox_id_tag[4], ox_id_tag[5]);
816 * fnic_fcpio_icmnd_cmpl_handler
817 * Routine to handle icmnd completions
819 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
820 struct fcpio_fw_req *desc)
824 struct fcpio_tag tag;
827 struct fcpio_icmnd_cmpl *icmnd_cmpl;
828 struct fnic_io_req *io_req;
829 struct scsi_cmnd *sc;
830 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
834 unsigned long start_time;
835 unsigned long io_duration_time;
837 /* Decode the cmpl description to get the io_req id */
838 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
839 fcpio_tag_id_dec(&tag, &id);
840 icmnd_cmpl = &desc->u.icmnd_cmpl;
842 if (id >= fnic->fnic_max_tag_id) {
843 shost_printk(KERN_ERR, fnic->lport->host,
844 "Tag out of range tag %x hdr status = %s\n",
845 id, fnic_fcpio_status_to_str(hdr_status));
849 sc = scsi_host_find_tag(fnic->lport->host, id);
852 atomic64_inc(&fnic_stats->io_stats.sc_null);
853 shost_printk(KERN_ERR, fnic->lport->host,
854 "icmnd_cmpl sc is null - "
855 "hdr status = %s tag = 0x%x desc = 0x%p\n",
856 fnic_fcpio_status_to_str(hdr_status), id, desc);
857 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
858 fnic->lport->host->host_no, id,
859 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
860 (u64)icmnd_cmpl->_resvd0[0]),
861 ((u64)hdr_status << 16 |
862 (u64)icmnd_cmpl->scsi_status << 8 |
863 (u64)icmnd_cmpl->flags), desc,
864 (u64)icmnd_cmpl->residual, 0);
868 io_lock = fnic_io_lock_hash(fnic, sc);
869 spin_lock_irqsave(io_lock, flags);
870 io_req = (struct fnic_io_req *)CMD_SP(sc);
871 WARN_ON_ONCE(!io_req);
873 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
874 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
875 spin_unlock_irqrestore(io_lock, flags);
876 shost_printk(KERN_ERR, fnic->lport->host,
877 "icmnd_cmpl io_req is null - "
878 "hdr status = %s tag = 0x%x sc 0x%p\n",
879 fnic_fcpio_status_to_str(hdr_status), id, sc);
882 start_time = io_req->start_time;
884 /* firmware completed the io */
885 io_req->io_completed = 1;
888 * if SCSI-ML has already issued abort on this command,
889 * set completion of the IO. The abts path will clean it up
891 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
894 * set the FNIC_IO_DONE so that this doesn't get
895 * flagged as 'out of order' if it was not aborted
897 CMD_FLAGS(sc) |= FNIC_IO_DONE;
898 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
899 spin_unlock_irqrestore(io_lock, flags);
900 if(FCPIO_ABORTED == hdr_status)
901 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
903 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
904 "icmnd_cmpl abts pending "
905 "hdr status = %s tag = 0x%x sc = 0x%p "
906 "scsi_status = %x residual = %d\n",
907 fnic_fcpio_status_to_str(hdr_status),
909 icmnd_cmpl->scsi_status,
910 icmnd_cmpl->residual);
914 /* Mark the IO as complete */
915 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
917 icmnd_cmpl = &desc->u.icmnd_cmpl;
919 switch (hdr_status) {
921 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
922 xfer_len = scsi_bufflen(sc);
923 scsi_set_resid(sc, icmnd_cmpl->residual);
925 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
926 xfer_len -= icmnd_cmpl->residual;
928 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
929 atomic64_inc(&fnic_stats->misc_stats.check_condition);
931 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
932 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
935 case FCPIO_TIMEOUT: /* request was timed out */
936 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
937 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
940 case FCPIO_ABORTED: /* request was aborted */
941 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
942 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
945 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
946 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
947 scsi_set_resid(sc, icmnd_cmpl->residual);
948 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
951 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
952 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
953 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
956 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
957 atomic64_inc(&fnic_stats->io_stats.io_not_found);
958 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
961 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
962 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
963 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
966 case FCPIO_FW_ERR: /* request was terminated due fw error */
967 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
968 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
971 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
972 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
973 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
976 case FCPIO_INVALID_HEADER: /* header contains invalid data */
977 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
978 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
980 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
984 /* Break link with the SCSI command */
986 CMD_FLAGS(sc) |= FNIC_IO_DONE;
988 spin_unlock_irqrestore(io_lock, flags);
990 if (hdr_status != FCPIO_SUCCESS) {
991 atomic64_inc(&fnic_stats->io_stats.io_failures);
992 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
993 fnic_fcpio_status_to_str(hdr_status));
996 fnic_release_ioreq_buf(fnic, io_req, sc);
998 mempool_free(io_req, fnic->io_req_pool);
1000 cmd_trace = ((u64)hdr_status << 56) |
1001 (u64)icmnd_cmpl->scsi_status << 48 |
1002 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1003 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1004 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1006 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1007 sc->device->host->host_no, id, sc,
1008 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1009 (u64)icmnd_cmpl->_resvd0[0] << 48 |
1010 jiffies_to_msecs(jiffies - start_time)),
1012 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1014 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1015 fnic->lport->host_stats.fcp_input_requests++;
1016 fnic->fcp_input_bytes += xfer_len;
1017 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1018 fnic->lport->host_stats.fcp_output_requests++;
1019 fnic->fcp_output_bytes += xfer_len;
1021 fnic->lport->host_stats.fcp_control_requests++;
1023 atomic64_dec(&fnic_stats->io_stats.active_ios);
1024 if (atomic64_read(&fnic->io_cmpl_skip))
1025 atomic64_dec(&fnic->io_cmpl_skip);
1027 atomic64_inc(&fnic_stats->io_stats.io_completions);
1030 io_duration_time = jiffies_to_msecs(jiffies) -
1031 jiffies_to_msecs(start_time);
1033 if(io_duration_time <= 10)
1034 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1035 else if(io_duration_time <= 100)
1036 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1037 else if(io_duration_time <= 500)
1038 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1039 else if(io_duration_time <= 5000)
1040 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1041 else if(io_duration_time <= 10000)
1042 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1043 else if(io_duration_time <= 30000)
1044 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1046 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1048 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1049 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1052 /* Call SCSI completion function to complete the IO */
1057 /* fnic_fcpio_itmf_cmpl_handler
1058 * Routine to handle itmf completions
1060 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1061 struct fcpio_fw_req *desc)
1065 struct fcpio_tag tag;
1067 struct scsi_cmnd *sc;
1068 struct fnic_io_req *io_req;
1069 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1070 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1071 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1072 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1073 unsigned long flags;
1074 spinlock_t *io_lock;
1075 unsigned long start_time;
1077 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1078 fcpio_tag_id_dec(&tag, &id);
1080 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1081 shost_printk(KERN_ERR, fnic->lport->host,
1082 "Tag out of range tag %x hdr status = %s\n",
1083 id, fnic_fcpio_status_to_str(hdr_status));
1087 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1090 atomic64_inc(&fnic_stats->io_stats.sc_null);
1091 shost_printk(KERN_ERR, fnic->lport->host,
1092 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1093 fnic_fcpio_status_to_str(hdr_status), id);
1096 io_lock = fnic_io_lock_hash(fnic, sc);
1097 spin_lock_irqsave(io_lock, flags);
1098 io_req = (struct fnic_io_req *)CMD_SP(sc);
1099 WARN_ON_ONCE(!io_req);
1101 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1102 spin_unlock_irqrestore(io_lock, flags);
1103 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1104 shost_printk(KERN_ERR, fnic->lport->host,
1105 "itmf_cmpl io_req is null - "
1106 "hdr status = %s tag = 0x%x sc 0x%p\n",
1107 fnic_fcpio_status_to_str(hdr_status), id, sc);
1110 start_time = io_req->start_time;
1112 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1113 /* Abort and terminate completion of device reset req */
1114 /* REVISIT : Add asserts about various flags */
1115 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1116 "dev reset abts cmpl recd. id %x status %s\n",
1117 id, fnic_fcpio_status_to_str(hdr_status));
1118 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1119 CMD_ABTS_STATUS(sc) = hdr_status;
1120 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1121 if (io_req->abts_done)
1122 complete(io_req->abts_done);
1123 spin_unlock_irqrestore(io_lock, flags);
1124 } else if (id & FNIC_TAG_ABORT) {
1125 /* Completion of abort cmd */
1126 switch (hdr_status) {
1130 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1131 atomic64_inc(&abts_stats->abort_fw_timeouts);
1134 &term_stats->terminate_fw_timeouts);
1136 case FCPIO_ITMF_REJECTED:
1137 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1138 "abort reject recd. id %d\n",
1139 (int)(id & FNIC_TAG_MASK));
1141 case FCPIO_IO_NOT_FOUND:
1142 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1143 atomic64_inc(&abts_stats->abort_io_not_found);
1146 &term_stats->terminate_io_not_found);
1149 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1150 atomic64_inc(&abts_stats->abort_failures);
1153 &term_stats->terminate_failures);
1156 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1157 /* This is a late completion. Ignore it */
1158 spin_unlock_irqrestore(io_lock, flags);
1162 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1163 CMD_ABTS_STATUS(sc) = hdr_status;
1165 /* If the status is IO not found consider it as success */
1166 if (hdr_status == FCPIO_IO_NOT_FOUND)
1167 CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1169 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1170 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1172 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1173 "abts cmpl recd. id %d status %s\n",
1174 (int)(id & FNIC_TAG_MASK),
1175 fnic_fcpio_status_to_str(hdr_status));
1178 * If scsi_eh thread is blocked waiting for abts to complete,
1179 * signal completion to it. IO will be cleaned in the thread
1180 * else clean it in this context
1182 if (io_req->abts_done) {
1183 complete(io_req->abts_done);
1184 spin_unlock_irqrestore(io_lock, flags);
1186 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1187 "abts cmpl, completing IO\n");
1189 sc->result = (DID_ERROR << 16);
1191 spin_unlock_irqrestore(io_lock, flags);
1193 fnic_release_ioreq_buf(fnic, io_req, sc);
1194 mempool_free(io_req, fnic->io_req_pool);
1195 if (sc->scsi_done) {
1196 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1197 sc->device->host->host_no, id,
1199 jiffies_to_msecs(jiffies - start_time),
1201 (((u64)hdr_status << 40) |
1202 (u64)sc->cmnd[0] << 32 |
1203 (u64)sc->cmnd[2] << 24 |
1204 (u64)sc->cmnd[3] << 16 |
1205 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1206 (((u64)CMD_FLAGS(sc) << 32) |
1209 atomic64_dec(&fnic_stats->io_stats.active_ios);
1210 if (atomic64_read(&fnic->io_cmpl_skip))
1211 atomic64_dec(&fnic->io_cmpl_skip);
1213 atomic64_inc(&fnic_stats->io_stats.io_completions);
1217 } else if (id & FNIC_TAG_DEV_RST) {
1218 /* Completion of device reset */
1219 CMD_LR_STATUS(sc) = hdr_status;
1220 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1221 spin_unlock_irqrestore(io_lock, flags);
1222 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1223 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1224 sc->device->host->host_no, id, sc,
1225 jiffies_to_msecs(jiffies - start_time),
1227 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1228 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1229 "Terminate pending "
1230 "dev reset cmpl recd. id %d status %s\n",
1231 (int)(id & FNIC_TAG_MASK),
1232 fnic_fcpio_status_to_str(hdr_status));
1235 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1236 /* Need to wait for terminate completion */
1237 spin_unlock_irqrestore(io_lock, flags);
1238 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1239 sc->device->host->host_no, id, sc,
1240 jiffies_to_msecs(jiffies - start_time),
1242 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1244 "dev reset cmpl recd after time out. "
1245 "id %d status %s\n",
1246 (int)(id & FNIC_TAG_MASK),
1247 fnic_fcpio_status_to_str(hdr_status));
1250 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1251 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1252 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1253 "dev reset cmpl recd. id %d status %s\n",
1254 (int)(id & FNIC_TAG_MASK),
1255 fnic_fcpio_status_to_str(hdr_status));
1256 if (io_req->dr_done)
1257 complete(io_req->dr_done);
1258 spin_unlock_irqrestore(io_lock, flags);
1261 shost_printk(KERN_ERR, fnic->lport->host,
1262 "Unexpected itmf io state %s tag %x\n",
1263 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1264 spin_unlock_irqrestore(io_lock, flags);
1270 * fnic_fcpio_cmpl_handler
1271 * Routine to service the cq for wq_copy
1273 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1274 unsigned int cq_index,
1275 struct fcpio_fw_req *desc)
1277 struct fnic *fnic = vnic_dev_priv(vdev);
1279 switch (desc->hdr.type) {
1280 case FCPIO_ICMND_CMPL: /* fw completed a command */
1281 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1282 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1283 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1284 case FCPIO_RESET_CMPL: /* fw completed reset */
1285 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1291 switch (desc->hdr.type) {
1292 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1293 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1296 case FCPIO_ICMND_CMPL: /* fw completed a command */
1297 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1300 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1301 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1304 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1305 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1306 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1309 case FCPIO_RESET_CMPL: /* fw completed reset */
1310 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1314 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1315 "firmware completion type %d\n",
1324 * fnic_wq_copy_cmpl_handler
1325 * Routine to process wq copy
1327 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1329 unsigned int wq_work_done = 0;
1330 unsigned int i, cq_index;
1331 unsigned int cur_work_done;
1332 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1333 u64 start_jiffies = 0;
1334 u64 end_jiffies = 0;
1335 u64 delta_jiffies = 0;
1338 for (i = 0; i < fnic->wq_copy_count; i++) {
1339 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1341 start_jiffies = jiffies;
1342 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1343 fnic_fcpio_cmpl_handler,
1345 end_jiffies = jiffies;
1347 wq_work_done += cur_work_done;
1348 delta_jiffies = end_jiffies - start_jiffies;
1350 (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1351 atomic64_set(&misc_stats->max_isr_jiffies,
1353 delta_ms = jiffies_to_msecs(delta_jiffies);
1354 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1355 atomic64_set(&misc_stats->corr_work_done,
1359 return wq_work_done;
1362 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1365 struct fnic_io_req *io_req;
1366 unsigned long flags = 0;
1367 struct scsi_cmnd *sc;
1368 spinlock_t *io_lock;
1369 unsigned long start_time = 0;
1370 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1372 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1373 if (i == exclude_id)
1376 io_lock = fnic_io_lock_tag(fnic, i);
1377 spin_lock_irqsave(io_lock, flags);
1378 sc = scsi_host_find_tag(fnic->lport->host, i);
1380 spin_unlock_irqrestore(io_lock, flags);
1384 io_req = (struct fnic_io_req *)CMD_SP(sc);
1385 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1386 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1388 * We will be here only when FW completes reset
1389 * without sending completions for outstanding ios.
1391 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1392 if (io_req && io_req->dr_done)
1393 complete(io_req->dr_done);
1394 else if (io_req && io_req->abts_done)
1395 complete(io_req->abts_done);
1396 spin_unlock_irqrestore(io_lock, flags);
1398 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1399 spin_unlock_irqrestore(io_lock, flags);
1403 spin_unlock_irqrestore(io_lock, flags);
1404 goto cleanup_scsi_cmd;
1409 spin_unlock_irqrestore(io_lock, flags);
1412 * If there is a scsi_cmnd associated with this io_req, then
1413 * free the corresponding state
1415 start_time = io_req->start_time;
1416 fnic_release_ioreq_buf(fnic, io_req, sc);
1417 mempool_free(io_req, fnic->io_req_pool);
1420 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1421 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1422 "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1423 __func__, sc->request->tag, sc,
1424 (jiffies - start_time));
1426 if (atomic64_read(&fnic->io_cmpl_skip))
1427 atomic64_dec(&fnic->io_cmpl_skip);
1429 atomic64_inc(&fnic_stats->io_stats.io_completions);
1431 /* Complete the command to SCSI */
1432 if (sc->scsi_done) {
1433 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
1434 shost_printk(KERN_ERR, fnic->lport->host,
1435 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1436 sc->request->tag, sc);
1438 FNIC_TRACE(fnic_cleanup_io,
1439 sc->device->host->host_no, i, sc,
1440 jiffies_to_msecs(jiffies - start_time),
1441 0, ((u64)sc->cmnd[0] << 32 |
1442 (u64)sc->cmnd[2] << 24 |
1443 (u64)sc->cmnd[3] << 16 |
1444 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1445 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1452 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1453 struct fcpio_host_req *desc)
1456 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1457 struct fnic_io_req *io_req;
1458 struct scsi_cmnd *sc;
1459 unsigned long flags;
1460 spinlock_t *io_lock;
1461 unsigned long start_time = 0;
1463 /* get the tag reference */
1464 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1465 id &= FNIC_TAG_MASK;
1467 if (id >= fnic->fnic_max_tag_id)
1470 sc = scsi_host_find_tag(fnic->lport->host, id);
1474 io_lock = fnic_io_lock_hash(fnic, sc);
1475 spin_lock_irqsave(io_lock, flags);
1477 /* Get the IO context which this desc refers to */
1478 io_req = (struct fnic_io_req *)CMD_SP(sc);
1480 /* fnic interrupts are turned off by now */
1483 spin_unlock_irqrestore(io_lock, flags);
1484 goto wq_copy_cleanup_scsi_cmd;
1489 spin_unlock_irqrestore(io_lock, flags);
1491 start_time = io_req->start_time;
1492 fnic_release_ioreq_buf(fnic, io_req, sc);
1493 mempool_free(io_req, fnic->io_req_pool);
1495 wq_copy_cleanup_scsi_cmd:
1496 sc->result = DID_NO_CONNECT << 16;
1497 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1498 " DID_NO_CONNECT\n");
1500 if (sc->scsi_done) {
1501 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1502 sc->device->host->host_no, id, sc,
1503 jiffies_to_msecs(jiffies - start_time),
1504 0, ((u64)sc->cmnd[0] << 32 |
1505 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1506 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1507 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1513 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1514 u32 task_req, u8 *fc_lun,
1515 struct fnic_io_req *io_req)
1517 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1518 struct Scsi_Host *host = fnic->lport->host;
1519 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1520 unsigned long flags;
1522 spin_lock_irqsave(host->host_lock, flags);
1523 if (unlikely(fnic_chk_state_flags_locked(fnic,
1524 FNIC_FLAGS_IO_BLOCKED))) {
1525 spin_unlock_irqrestore(host->host_lock, flags);
1528 atomic_inc(&fnic->in_flight);
1529 spin_unlock_irqrestore(host->host_lock, flags);
1531 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1533 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1534 free_wq_copy_descs(fnic, wq);
1536 if (!vnic_wq_copy_desc_avail(wq)) {
1537 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1538 atomic_dec(&fnic->in_flight);
1539 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1540 "fnic_queue_abort_io_req: failure: no descriptors\n");
1541 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1544 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1545 0, task_req, tag, fc_lun, io_req->port_id,
1546 fnic->config.ra_tov, fnic->config.ed_tov);
1548 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1549 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1550 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1551 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1552 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1554 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1555 atomic_dec(&fnic->in_flight);
1560 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1565 struct fnic_io_req *io_req;
1566 spinlock_t *io_lock;
1567 unsigned long flags;
1568 struct scsi_cmnd *sc;
1569 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1570 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1571 struct scsi_lun fc_lun;
1572 enum fnic_ioreq_state old_ioreq_state;
1574 FNIC_SCSI_DBG(KERN_DEBUG,
1576 "fnic_rport_exch_reset called portid 0x%06x\n",
1579 if (fnic->in_remove)
1582 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1584 io_lock = fnic_io_lock_tag(fnic, tag);
1585 spin_lock_irqsave(io_lock, flags);
1586 sc = scsi_host_find_tag(fnic->lport->host, tag);
1588 spin_unlock_irqrestore(io_lock, flags);
1592 io_req = (struct fnic_io_req *)CMD_SP(sc);
1594 if (!io_req || io_req->port_id != port_id) {
1595 spin_unlock_irqrestore(io_lock, flags);
1599 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1600 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1601 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1602 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1604 spin_unlock_irqrestore(io_lock, flags);
1609 * Found IO that is still pending with firmware and
1610 * belongs to rport that went away
1612 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1613 spin_unlock_irqrestore(io_lock, flags);
1616 if (io_req->abts_done) {
1617 shost_printk(KERN_ERR, fnic->lport->host,
1618 "fnic_rport_exch_reset: io_req->abts_done is set "
1620 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1623 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1624 shost_printk(KERN_ERR, fnic->lport->host,
1626 "IO not yet issued %p tag 0x%x flags "
1628 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1630 old_ioreq_state = CMD_STATE(sc);
1631 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1632 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1633 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1634 atomic64_inc(&reset_stats->device_reset_terminates);
1635 abt_tag = (tag | FNIC_TAG_DEV_RST);
1636 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1637 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1641 BUG_ON(io_req->abts_done);
1643 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1644 "fnic_rport_reset_exch: Issuing abts\n");
1646 spin_unlock_irqrestore(io_lock, flags);
1648 /* Now queue the abort command to firmware */
1649 int_to_scsilun(sc->device->lun, &fc_lun);
1651 if (fnic_queue_abort_io_req(fnic, abt_tag,
1652 FCPIO_ITMF_ABT_TASK_TERM,
1653 fc_lun.scsi_lun, io_req)) {
1655 * Revert the cmd state back to old state, if
1656 * it hasn't changed in between. This cmd will get
1657 * aborted later by scsi_eh, or cleaned up during
1660 spin_lock_irqsave(io_lock, flags);
1661 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1662 CMD_STATE(sc) = old_ioreq_state;
1663 spin_unlock_irqrestore(io_lock, flags);
1665 spin_lock_irqsave(io_lock, flags);
1666 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1667 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1669 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1670 spin_unlock_irqrestore(io_lock, flags);
1671 atomic64_inc(&term_stats->terminates);
1675 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1676 atomic64_set(&term_stats->max_terminates, term_cnt);
1680 void fnic_terminate_rport_io(struct fc_rport *rport)
1685 struct fnic_io_req *io_req;
1686 spinlock_t *io_lock;
1687 unsigned long flags;
1688 struct scsi_cmnd *sc;
1689 struct scsi_lun fc_lun;
1690 struct fc_rport_libfc_priv *rdata;
1691 struct fc_lport *lport;
1693 struct fc_rport *cmd_rport;
1694 struct reset_stats *reset_stats;
1695 struct terminate_stats *term_stats;
1696 enum fnic_ioreq_state old_ioreq_state;
1699 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1702 rdata = rport->dd_data;
1705 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1708 lport = rdata->local_port;
1711 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1714 fnic = lport_priv(lport);
1715 FNIC_SCSI_DBG(KERN_DEBUG,
1716 fnic->lport->host, "fnic_terminate_rport_io called"
1717 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1718 rport->port_name, rport->node_name, rport,
1721 if (fnic->in_remove)
1724 reset_stats = &fnic->fnic_stats.reset_stats;
1725 term_stats = &fnic->fnic_stats.term_stats;
1727 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1729 io_lock = fnic_io_lock_tag(fnic, tag);
1730 spin_lock_irqsave(io_lock, flags);
1731 sc = scsi_host_find_tag(fnic->lport->host, tag);
1733 spin_unlock_irqrestore(io_lock, flags);
1737 cmd_rport = starget_to_rport(scsi_target(sc->device));
1738 if (rport != cmd_rport) {
1739 spin_unlock_irqrestore(io_lock, flags);
1743 io_req = (struct fnic_io_req *)CMD_SP(sc);
1745 if (!io_req || rport != cmd_rport) {
1746 spin_unlock_irqrestore(io_lock, flags);
1750 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1751 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1752 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1753 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1755 spin_unlock_irqrestore(io_lock, flags);
1759 * Found IO that is still pending with firmware and
1760 * belongs to rport that went away
1762 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1763 spin_unlock_irqrestore(io_lock, flags);
1766 if (io_req->abts_done) {
1767 shost_printk(KERN_ERR, fnic->lport->host,
1768 "fnic_terminate_rport_io: io_req->abts_done is set "
1770 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1772 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1773 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1774 "fnic_terminate_rport_io "
1775 "IO not yet issued %p tag 0x%x flags "
1777 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1779 old_ioreq_state = CMD_STATE(sc);
1780 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1781 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1782 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1783 atomic64_inc(&reset_stats->device_reset_terminates);
1784 abt_tag = (tag | FNIC_TAG_DEV_RST);
1785 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1786 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1789 BUG_ON(io_req->abts_done);
1791 FNIC_SCSI_DBG(KERN_DEBUG,
1793 "fnic_terminate_rport_io: Issuing abts\n");
1795 spin_unlock_irqrestore(io_lock, flags);
1797 /* Now queue the abort command to firmware */
1798 int_to_scsilun(sc->device->lun, &fc_lun);
1800 if (fnic_queue_abort_io_req(fnic, abt_tag,
1801 FCPIO_ITMF_ABT_TASK_TERM,
1802 fc_lun.scsi_lun, io_req)) {
1804 * Revert the cmd state back to old state, if
1805 * it hasn't changed in between. This cmd will get
1806 * aborted later by scsi_eh, or cleaned up during
1809 spin_lock_irqsave(io_lock, flags);
1810 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1811 CMD_STATE(sc) = old_ioreq_state;
1812 spin_unlock_irqrestore(io_lock, flags);
1814 spin_lock_irqsave(io_lock, flags);
1815 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1816 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1818 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1819 spin_unlock_irqrestore(io_lock, flags);
1820 atomic64_inc(&term_stats->terminates);
1824 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1825 atomic64_set(&term_stats->max_terminates, term_cnt);
1830 * This function is exported to SCSI for sending abort cmnds.
1831 * A SCSI IO is represented by a io_req in the driver.
1832 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1834 int fnic_abort_cmd(struct scsi_cmnd *sc)
1836 struct fc_lport *lp;
1838 struct fnic_io_req *io_req = NULL;
1839 struct fc_rport *rport;
1840 spinlock_t *io_lock;
1841 unsigned long flags;
1842 unsigned long start_time = 0;
1845 struct scsi_lun fc_lun;
1846 struct fnic_stats *fnic_stats;
1847 struct abort_stats *abts_stats;
1848 struct terminate_stats *term_stats;
1849 enum fnic_ioreq_state old_ioreq_state;
1851 unsigned long abt_issued_time;
1852 DECLARE_COMPLETION_ONSTACK(tm_done);
1854 /* Wait for rport to unblock */
1855 fc_block_scsi_eh(sc);
1857 /* Get local-port, check ready and link up */
1858 lp = shost_priv(sc->device->host);
1860 fnic = lport_priv(lp);
1861 fnic_stats = &fnic->fnic_stats;
1862 abts_stats = &fnic->fnic_stats.abts_stats;
1863 term_stats = &fnic->fnic_stats.term_stats;
1865 rport = starget_to_rport(scsi_target(sc->device));
1866 tag = sc->request->tag;
1867 FNIC_SCSI_DBG(KERN_DEBUG,
1869 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1870 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1872 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1874 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1876 goto fnic_abort_cmd_end;
1880 * Avoid a race between SCSI issuing the abort and the device
1881 * completing the command.
1883 * If the command is already completed by the fw cmpl code,
1884 * we just return SUCCESS from here. This means that the abort
1885 * succeeded. In the SCSI ML, since the timeout for command has
1886 * happened, the completion wont actually complete the command
1887 * and it will be considered as an aborted command
1889 * The CMD_SP will not be cleared except while holding io_req_lock.
1891 io_lock = fnic_io_lock_hash(fnic, sc);
1892 spin_lock_irqsave(io_lock, flags);
1893 io_req = (struct fnic_io_req *)CMD_SP(sc);
1895 spin_unlock_irqrestore(io_lock, flags);
1896 goto fnic_abort_cmd_end;
1899 io_req->abts_done = &tm_done;
1901 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1902 spin_unlock_irqrestore(io_lock, flags);
1906 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1907 if (abt_issued_time <= 6000)
1908 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1909 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1910 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1911 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1912 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1913 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1914 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1915 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1916 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1917 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1918 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1920 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1922 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1923 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1925 * Command is still pending, need to abort it
1926 * If the firmware completes the command after this point,
1927 * the completion wont be done till mid-layer, since abort
1928 * has already started.
1930 old_ioreq_state = CMD_STATE(sc);
1931 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1932 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1934 spin_unlock_irqrestore(io_lock, flags);
1937 * Check readiness of the remote port. If the path to remote
1938 * port is up, then send abts to the remote port to terminate
1939 * the IO. Else, just locally terminate the IO in the firmware
1941 if (fc_remote_port_chkready(rport) == 0)
1942 task_req = FCPIO_ITMF_ABT_TASK;
1944 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1945 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1948 /* Now queue the abort command to firmware */
1949 int_to_scsilun(sc->device->lun, &fc_lun);
1951 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1952 fc_lun.scsi_lun, io_req)) {
1953 spin_lock_irqsave(io_lock, flags);
1954 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1955 CMD_STATE(sc) = old_ioreq_state;
1956 io_req = (struct fnic_io_req *)CMD_SP(sc);
1958 io_req->abts_done = NULL;
1959 spin_unlock_irqrestore(io_lock, flags);
1961 goto fnic_abort_cmd_end;
1963 if (task_req == FCPIO_ITMF_ABT_TASK) {
1964 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1965 atomic64_inc(&fnic_stats->abts_stats.aborts);
1967 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1968 atomic64_inc(&fnic_stats->term_stats.terminates);
1972 * We queued an abort IO, wait for its completion.
1973 * Once the firmware completes the abort command, it will
1974 * wake up this thread.
1977 wait_for_completion_timeout(&tm_done,
1979 (2 * fnic->config.ra_tov +
1980 fnic->config.ed_tov));
1982 /* Check the abort status */
1983 spin_lock_irqsave(io_lock, flags);
1985 io_req = (struct fnic_io_req *)CMD_SP(sc);
1987 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1988 spin_unlock_irqrestore(io_lock, flags);
1989 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1991 goto fnic_abort_cmd_end;
1993 io_req->abts_done = NULL;
1995 /* fw did not complete abort, timed out */
1996 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1997 spin_unlock_irqrestore(io_lock, flags);
1998 if (task_req == FCPIO_ITMF_ABT_TASK) {
1999 atomic64_inc(&abts_stats->abort_drv_timeouts);
2001 atomic64_inc(&term_stats->terminate_drv_timeouts);
2003 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
2005 goto fnic_abort_cmd_end;
2008 /* IO out of order */
2010 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
2011 spin_unlock_irqrestore(io_lock, flags);
2012 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2013 "Issuing Host reset due to out of order IO\n");
2016 goto fnic_abort_cmd_end;
2019 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2021 start_time = io_req->start_time;
2023 * firmware completed the abort, check the status,
2024 * free the io_req if successful. If abort fails,
2025 * Device reset will clean the I/O.
2027 if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2031 spin_unlock_irqrestore(io_lock, flags);
2032 goto fnic_abort_cmd_end;
2035 spin_unlock_irqrestore(io_lock, flags);
2037 fnic_release_ioreq_buf(fnic, io_req, sc);
2038 mempool_free(io_req, fnic->io_req_pool);
2040 if (sc->scsi_done) {
2041 /* Call SCSI completion function to complete the IO */
2042 sc->result = (DID_ABORT << 16);
2044 atomic64_dec(&fnic_stats->io_stats.active_ios);
2045 if (atomic64_read(&fnic->io_cmpl_skip))
2046 atomic64_dec(&fnic->io_cmpl_skip);
2048 atomic64_inc(&fnic_stats->io_stats.io_completions);
2052 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2053 sc->request->tag, sc,
2054 jiffies_to_msecs(jiffies - start_time),
2055 0, ((u64)sc->cmnd[0] << 32 |
2056 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2057 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2058 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2060 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2061 "Returning from abort cmd type %x %s\n", task_req,
2063 "SUCCESS" : "FAILED");
2067 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2068 struct scsi_cmnd *sc,
2069 struct fnic_io_req *io_req)
2071 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2072 struct Scsi_Host *host = fnic->lport->host;
2073 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2074 struct scsi_lun fc_lun;
2076 unsigned long intr_flags;
2078 spin_lock_irqsave(host->host_lock, intr_flags);
2079 if (unlikely(fnic_chk_state_flags_locked(fnic,
2080 FNIC_FLAGS_IO_BLOCKED))) {
2081 spin_unlock_irqrestore(host->host_lock, intr_flags);
2084 atomic_inc(&fnic->in_flight);
2085 spin_unlock_irqrestore(host->host_lock, intr_flags);
2087 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2089 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2090 free_wq_copy_descs(fnic, wq);
2092 if (!vnic_wq_copy_desc_avail(wq)) {
2093 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2094 "queue_dr_io_req failure - no descriptors\n");
2095 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2100 /* fill in the lun info */
2101 int_to_scsilun(sc->device->lun, &fc_lun);
2103 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2104 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2105 fc_lun.scsi_lun, io_req->port_id,
2106 fnic->config.ra_tov, fnic->config.ed_tov);
2108 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2109 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2110 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2111 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2112 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2115 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2116 atomic_dec(&fnic->in_flight);
2122 * Clean up any pending aborts on the lun
2123 * For each outstanding IO on this lun, whose abort is not completed by fw,
2124 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2125 * successfully aborted, 1 otherwise
2127 static int fnic_clean_pending_aborts(struct fnic *fnic,
2128 struct scsi_cmnd *lr_sc,
2133 struct fnic_io_req *io_req;
2134 spinlock_t *io_lock;
2135 unsigned long flags;
2137 struct scsi_cmnd *sc;
2138 struct scsi_lun fc_lun;
2139 struct scsi_device *lun_dev = lr_sc->device;
2140 DECLARE_COMPLETION_ONSTACK(tm_done);
2141 enum fnic_ioreq_state old_ioreq_state;
2143 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2144 io_lock = fnic_io_lock_tag(fnic, tag);
2145 spin_lock_irqsave(io_lock, flags);
2146 sc = scsi_host_find_tag(fnic->lport->host, tag);
2148 * ignore this lun reset cmd if issued using new SC
2149 * or cmds that do not belong to this lun
2151 if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2152 spin_unlock_irqrestore(io_lock, flags);
2156 io_req = (struct fnic_io_req *)CMD_SP(sc);
2158 if (!io_req || sc->device != lun_dev) {
2159 spin_unlock_irqrestore(io_lock, flags);
2164 * Found IO that is still pending with firmware and
2165 * belongs to the LUN that we are resetting
2167 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2168 "Found IO in %s on lun\n",
2169 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2171 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2172 spin_unlock_irqrestore(io_lock, flags);
2175 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2176 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2177 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2178 "%s dev rst not pending sc 0x%p\n", __func__,
2180 spin_unlock_irqrestore(io_lock, flags);
2184 if (io_req->abts_done)
2185 shost_printk(KERN_ERR, fnic->lport->host,
2186 "%s: io_req->abts_done is set state is %s\n",
2187 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2188 old_ioreq_state = CMD_STATE(sc);
2190 * Any pending IO issued prior to reset is expected to be
2191 * in abts pending state, if not we need to set
2192 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2193 * When IO is completed, the IO will be handed over and
2194 * handled in this function.
2196 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2198 BUG_ON(io_req->abts_done);
2201 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2202 abt_tag |= FNIC_TAG_DEV_RST;
2203 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2204 "%s: dev rst sc 0x%p\n", __func__, sc);
2207 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2208 io_req->abts_done = &tm_done;
2209 spin_unlock_irqrestore(io_lock, flags);
2211 /* Now queue the abort command to firmware */
2212 int_to_scsilun(sc->device->lun, &fc_lun);
2214 if (fnic_queue_abort_io_req(fnic, abt_tag,
2215 FCPIO_ITMF_ABT_TASK_TERM,
2216 fc_lun.scsi_lun, io_req)) {
2217 spin_lock_irqsave(io_lock, flags);
2218 io_req = (struct fnic_io_req *)CMD_SP(sc);
2220 io_req->abts_done = NULL;
2221 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2222 CMD_STATE(sc) = old_ioreq_state;
2223 spin_unlock_irqrestore(io_lock, flags);
2225 goto clean_pending_aborts_end;
2227 spin_lock_irqsave(io_lock, flags);
2228 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2229 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2230 spin_unlock_irqrestore(io_lock, flags);
2232 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2234 wait_for_completion_timeout(&tm_done,
2236 (fnic->config.ed_tov));
2238 /* Recheck cmd state to check if it is now aborted */
2239 spin_lock_irqsave(io_lock, flags);
2240 io_req = (struct fnic_io_req *)CMD_SP(sc);
2242 spin_unlock_irqrestore(io_lock, flags);
2243 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2247 io_req->abts_done = NULL;
2249 /* if abort is still pending with fw, fail */
2250 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2251 spin_unlock_irqrestore(io_lock, flags);
2252 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2254 goto clean_pending_aborts_end;
2256 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2258 /* original sc used for lr is handled by dev reset code */
2261 spin_unlock_irqrestore(io_lock, flags);
2263 /* original sc used for lr is handled by dev reset code */
2265 fnic_release_ioreq_buf(fnic, io_req, sc);
2266 mempool_free(io_req, fnic->io_req_pool);
2270 * Any IO is returned during reset, it needs to call scsi_done
2271 * to return the scsi_cmnd to upper layer.
2273 if (sc->scsi_done) {
2274 /* Set result to let upper SCSI layer retry */
2275 sc->result = DID_RESET << 16;
2280 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2282 /* walk again to check, if IOs are still pending in fw */
2283 if (fnic_is_abts_pending(fnic, lr_sc))
2286 clean_pending_aborts_end:
2291 * fnic_scsi_host_start_tag
2292 * Allocates tagid from host's tag list
2295 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2297 struct request_queue *q = sc->request->q;
2298 struct request *dummy;
2300 dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2304 sc->tag = sc->request->tag = dummy->tag;
2305 sc->host_scribble = (unsigned char *)dummy;
2311 * fnic_scsi_host_end_tag
2312 * frees tag allocated by fnic_scsi_host_start_tag.
2315 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2317 struct request *dummy = (struct request *)sc->host_scribble;
2319 blk_mq_free_request(dummy);
2323 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2324 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2327 int fnic_device_reset(struct scsi_cmnd *sc)
2329 struct fc_lport *lp;
2331 struct fnic_io_req *io_req = NULL;
2332 struct fc_rport *rport;
2335 spinlock_t *io_lock;
2336 unsigned long flags;
2337 unsigned long start_time = 0;
2338 struct scsi_lun fc_lun;
2339 struct fnic_stats *fnic_stats;
2340 struct reset_stats *reset_stats;
2342 DECLARE_COMPLETION_ONSTACK(tm_done);
2343 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2346 /* Wait for rport to unblock */
2347 fc_block_scsi_eh(sc);
2349 /* Get local-port, check ready and link up */
2350 lp = shost_priv(sc->device->host);
2352 fnic = lport_priv(lp);
2353 fnic_stats = &fnic->fnic_stats;
2354 reset_stats = &fnic->fnic_stats.reset_stats;
2356 atomic64_inc(&reset_stats->device_resets);
2358 rport = starget_to_rport(scsi_target(sc->device));
2359 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2360 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2361 rport->port_id, sc->device->lun, sc);
2363 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2364 goto fnic_device_reset_end;
2366 /* Check if remote port up */
2367 if (fc_remote_port_chkready(rport)) {
2368 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2369 goto fnic_device_reset_end;
2372 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2373 /* Allocate tag if not present */
2375 tag = sc->request->tag;
2376 if (unlikely(tag < 0)) {
2378 * Really should fix the midlayer to pass in a proper
2379 * request for ioctls...
2381 tag = fnic_scsi_host_start_tag(fnic, sc);
2382 if (unlikely(tag == SCSI_NO_TAG))
2383 goto fnic_device_reset_end;
2387 io_lock = fnic_io_lock_hash(fnic, sc);
2388 spin_lock_irqsave(io_lock, flags);
2389 io_req = (struct fnic_io_req *)CMD_SP(sc);
2392 * If there is a io_req attached to this command, then use it,
2393 * else allocate a new one.
2396 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2398 spin_unlock_irqrestore(io_lock, flags);
2399 goto fnic_device_reset_end;
2401 memset(io_req, 0, sizeof(*io_req));
2402 io_req->port_id = rport->port_id;
2403 CMD_SP(sc) = (char *)io_req;
2405 io_req->dr_done = &tm_done;
2406 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2407 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2408 spin_unlock_irqrestore(io_lock, flags);
2410 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2413 * issue the device reset, if enqueue failed, clean up the ioreq
2414 * and break assoc with scsi cmd
2416 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2417 spin_lock_irqsave(io_lock, flags);
2418 io_req = (struct fnic_io_req *)CMD_SP(sc);
2420 io_req->dr_done = NULL;
2421 goto fnic_device_reset_clean;
2423 spin_lock_irqsave(io_lock, flags);
2424 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2425 spin_unlock_irqrestore(io_lock, flags);
2428 * Wait on the local completion for LUN reset. The io_req may be
2429 * freed while we wait since we hold no lock.
2431 wait_for_completion_timeout(&tm_done,
2432 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2434 spin_lock_irqsave(io_lock, flags);
2435 io_req = (struct fnic_io_req *)CMD_SP(sc);
2437 spin_unlock_irqrestore(io_lock, flags);
2438 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2439 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2440 goto fnic_device_reset_end;
2442 io_req->dr_done = NULL;
2444 status = CMD_LR_STATUS(sc);
2447 * If lun reset not completed, bail out with failed. io_req
2448 * gets cleaned up during higher levels of EH
2450 if (status == FCPIO_INVALID_CODE) {
2451 atomic64_inc(&reset_stats->device_reset_timeouts);
2452 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2453 "Device reset timed out\n");
2454 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2455 spin_unlock_irqrestore(io_lock, flags);
2456 int_to_scsilun(sc->device->lun, &fc_lun);
2458 * Issue abort and terminate on device reset request.
2459 * If q'ing of terminate fails, retry it after a delay.
2462 spin_lock_irqsave(io_lock, flags);
2463 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2464 spin_unlock_irqrestore(io_lock, flags);
2467 spin_unlock_irqrestore(io_lock, flags);
2468 if (fnic_queue_abort_io_req(fnic,
2469 tag | FNIC_TAG_DEV_RST,
2470 FCPIO_ITMF_ABT_TASK_TERM,
2471 fc_lun.scsi_lun, io_req)) {
2472 wait_for_completion_timeout(&tm_done,
2473 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2475 spin_lock_irqsave(io_lock, flags);
2476 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2477 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2478 io_req->abts_done = &tm_done;
2479 spin_unlock_irqrestore(io_lock, flags);
2480 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2481 "Abort and terminate issued on Device reset "
2482 "tag 0x%x sc 0x%p\n", tag, sc);
2487 spin_lock_irqsave(io_lock, flags);
2488 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2489 spin_unlock_irqrestore(io_lock, flags);
2490 wait_for_completion_timeout(&tm_done,
2491 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2494 io_req = (struct fnic_io_req *)CMD_SP(sc);
2495 io_req->abts_done = NULL;
2496 goto fnic_device_reset_clean;
2500 spin_unlock_irqrestore(io_lock, flags);
2503 /* Completed, but not successful, clean up the io_req, return fail */
2504 if (status != FCPIO_SUCCESS) {
2505 spin_lock_irqsave(io_lock, flags);
2506 FNIC_SCSI_DBG(KERN_DEBUG,
2508 "Device reset completed - failed\n");
2509 io_req = (struct fnic_io_req *)CMD_SP(sc);
2510 goto fnic_device_reset_clean;
2514 * Clean up any aborts on this lun that have still not
2515 * completed. If any of these fail, then LUN reset fails.
2516 * clean_pending_aborts cleans all cmds on this lun except
2517 * the lun reset cmd. If all cmds get cleaned, the lun reset
2520 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2521 spin_lock_irqsave(io_lock, flags);
2522 io_req = (struct fnic_io_req *)CMD_SP(sc);
2523 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2524 "Device reset failed"
2525 " since could not abort all IOs\n");
2526 goto fnic_device_reset_clean;
2529 /* Clean lun reset command */
2530 spin_lock_irqsave(io_lock, flags);
2531 io_req = (struct fnic_io_req *)CMD_SP(sc);
2533 /* Completed, and successful */
2536 fnic_device_reset_clean:
2540 spin_unlock_irqrestore(io_lock, flags);
2543 start_time = io_req->start_time;
2544 fnic_release_ioreq_buf(fnic, io_req, sc);
2545 mempool_free(io_req, fnic->io_req_pool);
2548 fnic_device_reset_end:
2549 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2550 sc->request->tag, sc,
2551 jiffies_to_msecs(jiffies - start_time),
2552 0, ((u64)sc->cmnd[0] << 32 |
2553 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2554 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2555 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2557 /* free tag if it is allocated */
2558 if (unlikely(tag_gen_flag))
2559 fnic_scsi_host_end_tag(fnic, sc);
2561 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2562 "Returning from device reset %s\n",
2564 "SUCCESS" : "FAILED");
2567 atomic64_inc(&reset_stats->device_reset_failures);
2572 /* Clean up all IOs, clean up libFC local port */
2573 int fnic_reset(struct Scsi_Host *shost)
2575 struct fc_lport *lp;
2578 struct reset_stats *reset_stats;
2580 lp = shost_priv(shost);
2581 fnic = lport_priv(lp);
2582 reset_stats = &fnic->fnic_stats.reset_stats;
2584 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2585 "fnic_reset called\n");
2587 atomic64_inc(&reset_stats->fnic_resets);
2590 * Reset local port, this will clean up libFC exchanges,
2591 * reset remote port sessions, and if link is up, begin flogi
2593 ret = fc_lport_reset(lp);
2595 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2596 "Returning from fnic reset %s\n",
2598 "SUCCESS" : "FAILED");
2601 atomic64_inc(&reset_stats->fnic_reset_completions);
2603 atomic64_inc(&reset_stats->fnic_reset_failures);
2609 * SCSI Error handling calls driver's eh_host_reset if all prior
2610 * error handling levels return FAILED. If host reset completes
2611 * successfully, and if link is up, then Fabric login begins.
2613 * Host Reset is the highest level of error recovery. If this fails, then
2614 * host is offlined by SCSI.
2617 int fnic_host_reset(struct scsi_cmnd *sc)
2620 unsigned long wait_host_tmo;
2621 struct Scsi_Host *shost = sc->device->host;
2622 struct fc_lport *lp = shost_priv(shost);
2623 struct fnic *fnic = lport_priv(lp);
2624 unsigned long flags;
2626 spin_lock_irqsave(&fnic->fnic_lock, flags);
2627 if (fnic->internal_reset_inprogress == 0) {
2628 fnic->internal_reset_inprogress = 1;
2630 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2631 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2632 "host reset in progress skipping another host reset\n");
2635 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2638 * If fnic_reset is successful, wait for fabric login to complete
2639 * scsi-ml tries to send a TUR to every device if host reset is
2640 * successful, so before returning to scsi, fabric should be up
2642 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2643 if (ret == SUCCESS) {
2644 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2646 while (time_before(jiffies, wait_host_tmo)) {
2647 if ((lp->state == LPORT_ST_READY) &&
2656 spin_lock_irqsave(&fnic->fnic_lock, flags);
2657 fnic->internal_reset_inprogress = 0;
2658 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2663 * This fxn is called from libFC when host is removed
2665 void fnic_scsi_abort_io(struct fc_lport *lp)
2668 unsigned long flags;
2669 enum fnic_state old_state;
2670 struct fnic *fnic = lport_priv(lp);
2671 DECLARE_COMPLETION_ONSTACK(remove_wait);
2673 /* Issue firmware reset for fnic, wait for reset to complete */
2675 spin_lock_irqsave(&fnic->fnic_lock, flags);
2676 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2677 /* fw reset is in progress, poll for its completion */
2678 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2679 schedule_timeout(msecs_to_jiffies(100));
2680 goto retry_fw_reset;
2683 fnic->remove_wait = &remove_wait;
2684 old_state = fnic->state;
2685 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2686 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2687 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2689 err = fnic_fw_reset_handler(fnic);
2691 spin_lock_irqsave(&fnic->fnic_lock, flags);
2692 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2693 fnic->state = old_state;
2694 fnic->remove_wait = NULL;
2695 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2699 /* Wait for firmware reset to complete */
2700 wait_for_completion_timeout(&remove_wait,
2701 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2703 spin_lock_irqsave(&fnic->fnic_lock, flags);
2704 fnic->remove_wait = NULL;
2705 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2706 "fnic_scsi_abort_io %s\n",
2707 (fnic->state == FNIC_IN_ETH_MODE) ?
2708 "SUCCESS" : "FAILED");
2709 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2714 * This fxn called from libFC to clean up driver IO state on link down
2716 void fnic_scsi_cleanup(struct fc_lport *lp)
2718 unsigned long flags;
2719 enum fnic_state old_state;
2720 struct fnic *fnic = lport_priv(lp);
2722 /* issue fw reset */
2724 spin_lock_irqsave(&fnic->fnic_lock, flags);
2725 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2726 /* fw reset is in progress, poll for its completion */
2727 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2728 schedule_timeout(msecs_to_jiffies(100));
2729 goto retry_fw_reset;
2731 old_state = fnic->state;
2732 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2733 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2734 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2736 if (fnic_fw_reset_handler(fnic)) {
2737 spin_lock_irqsave(&fnic->fnic_lock, flags);
2738 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2739 fnic->state = old_state;
2740 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2745 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2749 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2751 struct fnic *fnic = lport_priv(lp);
2753 /* Non-zero sid, nothing to do */
2755 goto call_fc_exch_mgr_reset;
2758 fnic_rport_exch_reset(fnic, did);
2759 goto call_fc_exch_mgr_reset;
2764 * link down or device being removed
2766 if (!fnic->in_remove)
2767 fnic_scsi_cleanup(lp);
2769 fnic_scsi_abort_io(lp);
2771 /* call libFC exch mgr reset to reset its exchanges */
2772 call_fc_exch_mgr_reset:
2773 fc_exch_mgr_reset(lp, sid, did);
2778 * fnic_is_abts_pending() is a helper function that
2779 * walks through tag map to check if there is any IOs pending,if there is one,
2780 * then it returns 1 (true), otherwise 0 (false)
2781 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2782 * otherwise, it checks for all IOs.
2784 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2787 struct fnic_io_req *io_req;
2788 spinlock_t *io_lock;
2789 unsigned long flags;
2791 struct scsi_cmnd *sc;
2792 struct scsi_device *lun_dev = NULL;
2795 lun_dev = lr_sc->device;
2797 /* walk again to check, if IOs are still pending in fw */
2798 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2799 sc = scsi_host_find_tag(fnic->lport->host, tag);
2801 * ignore this lun reset cmd or cmds that do not belong to
2804 if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2807 io_lock = fnic_io_lock_hash(fnic, sc);
2808 spin_lock_irqsave(io_lock, flags);
2810 io_req = (struct fnic_io_req *)CMD_SP(sc);
2812 if (!io_req || sc->device != lun_dev) {
2813 spin_unlock_irqrestore(io_lock, flags);
2818 * Found IO that is still pending with firmware and
2819 * belongs to the LUN that we are resetting
2821 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2822 "Found IO in %s on lun\n",
2823 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2825 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2827 spin_unlock_irqrestore(io_lock, flags);