2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 void *data, uint32_t data_len,
14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
17 struct qedf_ctx *qedf;
18 struct fc_lport *lport;
19 struct qedf_ioreq *els_req;
20 struct qedf_mp_req *mp_req;
21 struct fc_frame_header *fc_hdr;
22 struct e4_fcoe_task_context *task;
31 QEDF_ERR(NULL, "fcport is NULL");
39 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
41 rc = fc_remote_port_chkready(fcport->rport);
43 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
47 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
48 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
54 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
55 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
60 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
62 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
63 "Failed to alloc ELS request 0x%x\n", op);
68 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
69 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
71 els_req->sc_cmd = NULL;
72 els_req->cmd_type = QEDF_ELS;
73 els_req->fcport = fcport;
74 els_req->cb_func = cb_func;
75 cb_arg->io_req = els_req;
77 els_req->cb_arg = cb_arg;
78 els_req->data_xfer_len = data_len;
80 /* Record which cpu this request is associated with */
81 els_req->cpu = smp_processor_id();
83 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
84 rc = qedf_init_mp_req(els_req);
86 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
87 kref_put(&els_req->refcount, qedf_release_cmd);
93 /* Fill ELS Payload */
94 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
95 memcpy(mp_req->req_buf, data, data_len);
97 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
98 els_req->cb_func = NULL;
99 els_req->cb_arg = NULL;
100 kref_put(&els_req->refcount, qedf_release_cmd);
108 fc_hdr = &(mp_req->req_fc_hdr);
110 did = fcport->rdata->ids.port_id;
113 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
114 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
117 /* Obtain exchange id */
120 spin_lock_irqsave(&fcport->rport_lock, flags);
122 sqe_idx = qedf_get_sqe_idx(fcport);
123 sqe = &fcport->sq[sqe_idx];
124 memset(sqe, 0, sizeof(struct fcoe_wqe));
126 /* Initialize task context for this IO request */
127 task = qedf_get_task_mem(&qedf->tasks, xid);
128 qedf_init_mp_task(els_req, task, sqe);
130 /* Put timer on original I/O request */
132 qedf_cmd_timer_set(qedf, els_req, timer_msec);
135 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
137 qedf_ring_doorbell(fcport);
138 set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
140 spin_unlock_irqrestore(&fcport->rport_lock, flags);
145 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
146 struct qedf_ioreq *els_req)
148 struct fcoe_task_context *task_ctx;
149 struct scsi_cmnd *sc_cmd;
151 struct fcoe_cqe_midpath_info *mp_info;
153 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
154 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
156 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
158 /* Kill the ELS timer */
159 cancel_delayed_work(&els_req->timeout_work);
162 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
163 sc_cmd = els_req->sc_cmd;
165 /* Get ELS response length from CQE */
166 mp_info = &cqe->cqe_info.midpath_info;
167 els_req->mp_req.resp_len = mp_info->data_placement_size;
169 /* Parse ELS response */
170 if ((els_req->cb_func) && (els_req->cb_arg)) {
171 els_req->cb_func(els_req->cb_arg);
172 els_req->cb_arg = NULL;
175 kref_put(&els_req->refcount, qedf_release_cmd);
178 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
180 struct qedf_ioreq *orig_io_req;
181 struct qedf_ioreq *rrq_req;
182 struct qedf_ctx *qedf;
185 rrq_req = cb_arg->io_req;
186 qedf = rrq_req->fcport->qedf;
188 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
190 orig_io_req = cb_arg->aborted_io_req;
195 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
196 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
197 cancel_delayed_work_sync(&orig_io_req->timeout_work);
199 refcount = kref_read(&orig_io_req->refcount);
200 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
201 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
202 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
205 * This should return the aborted io_req to the command pool. Note that
206 * we need to check the refcound in case the original request was
207 * flushed but we get a completion on this xid.
209 if (orig_io_req && refcount > 0)
210 kref_put(&orig_io_req->refcount, qedf_release_cmd);
214 * Release a reference to the rrq request if we timed out as the
215 * rrq completion handler is called directly from the timeout handler
216 * and not from els_compl where the reference would have normally been
219 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
220 kref_put(&rrq_req->refcount, qedf_release_cmd);
224 /* Assumes kref is already held by caller */
225 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
228 struct fc_els_rrq rrq;
229 struct qedf_rport *fcport;
230 struct fc_lport *lport;
231 struct qedf_els_cb_arg *cb_arg = NULL;
232 struct qedf_ctx *qedf;
238 if (!aborted_io_req) {
239 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
243 fcport = aborted_io_req->fcport;
246 refcount = kref_read(&aborted_io_req->refcount);
248 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
249 aborted_io_req->xid, refcount);
250 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
254 /* Check that fcport is still offloaded */
255 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
256 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
261 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
268 * Sanity check that we can send a RRQ to make sure that refcount isn't
271 refcount = kref_read(&aborted_io_req->refcount);
273 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
274 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
275 aborted_io_req->xid, aborted_io_req, refcount);
281 r_a_tov = lport->r_a_tov;
283 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
284 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
285 aborted_io_req->xid);
286 memset(&rrq, 0, sizeof(rrq));
288 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
290 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
296 cb_arg->aborted_io_req = aborted_io_req;
298 rrq.rrq_cmd = ELS_RRQ;
299 hton24(rrq.rrq_s_id, sid);
300 rrq.rrq_ox_id = htons(aborted_io_req->xid);
302 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
304 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
305 qedf_rrq_compl, cb_arg, r_a_tov);
309 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
310 "req 0x%x\n", aborted_io_req->xid);
312 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
317 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
321 struct fc_lport *lport = fcport->qedf->lport;
322 struct fc_frame_header *fh;
325 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
327 /* Set the OXID we return to what libfc used */
328 if (l2_oxid != FC_XID_UNKNOWN)
329 fh->fh_ox_id = htons(l2_oxid);
331 /* Setup header fields */
332 fh->fh_r_ctl = FC_RCTL_ELS_REP;
333 fh->fh_type = FC_TYPE_ELS;
334 /* Last sequence, end sequence */
335 fh->fh_f_ctl[0] = 0x98;
336 hton24(fh->fh_d_id, lport->port_id);
337 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
338 fh->fh_rx_id = 0xffff;
340 /* Set frame attributes */
341 crc = fcoe_fc_crc(fp);
344 fr_sof(fp) = FC_SOF_I3;
345 fr_eof(fp) = FC_EOF_T;
346 fr_crc(fp) = cpu_to_le32(~crc);
348 /* Send completed request to libfc */
349 fc_exch_recv(lport, fp);
353 * In instances where an ELS command times out we may need to restart the
354 * rport by logging out and then logging back in.
356 void qedf_restart_rport(struct qedf_rport *fcport)
358 struct fc_lport *lport;
359 struct fc_rport_priv *rdata;
366 spin_lock_irqsave(&fcport->rport_lock, flags);
367 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
368 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
369 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
370 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
372 spin_unlock_irqrestore(&fcport->rport_lock, flags);
376 /* Set that we are now in reset */
377 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
378 spin_unlock_irqrestore(&fcport->rport_lock, flags);
380 rdata = fcport->rdata;
382 lport = fcport->qedf->lport;
383 port_id = rdata->ids.port_id;
384 QEDF_ERR(&(fcport->qedf->dbg_ctx),
385 "LOGO port_id=%x.\n", port_id);
386 fc_rport_logoff(rdata);
387 mutex_lock(&lport->disc.disc_mutex);
388 /* Recreate the rport and log back in */
389 rdata = fc_rport_create(lport, port_id);
391 mutex_unlock(&lport->disc.disc_mutex);
392 fc_rport_login(rdata);
393 fcport->rdata = rdata;
395 mutex_unlock(&lport->disc.disc_mutex);
398 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
401 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
403 struct qedf_ioreq *els_req;
404 struct qedf_rport *fcport;
405 struct qedf_mp_req *mp_req;
407 struct fc_frame_header *fh, *mp_fc_hdr;
408 void *resp_buf, *fc_payload;
412 l2_oxid = cb_arg->l2_oxid;
413 els_req = cb_arg->io_req;
416 QEDF_ERR(NULL, "els_req is NULL.\n");
421 * If we are flushing the command just free the cb_arg as none of the
422 * response data will be valid.
424 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
427 fcport = els_req->fcport;
428 mp_req = &(els_req->mp_req);
429 mp_fc_hdr = &(mp_req->resp_fc_hdr);
430 resp_len = mp_req->resp_len;
431 resp_buf = mp_req->resp_buf;
434 * If a middle path ELS command times out, don't try to return
435 * the command but rather do any internal cleanup and then libfc
436 * timeout the command and clean up its internal resources.
438 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
440 * If ADISC times out, libfc will timeout the exchange and then
441 * try to send a PLOGI which will timeout since the session is
442 * still offloaded. Force libfc to logout the session which
443 * will offload the connection and allow the PLOGI response to
444 * flow over the LL2 path.
446 if (cb_arg->op == ELS_ADISC)
447 qedf_restart_rport(fcport);
451 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
452 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
453 "beyond page size.\n");
457 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
459 QEDF_ERR(&(fcport->qedf->dbg_ctx),
460 "fc_frame_alloc failure.\n");
464 /* Copy frame header from firmware into fp */
465 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
466 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
468 /* Copy payload from firmware into fp */
469 fc_payload = fc_frame_payload_get(fp, resp_len);
470 memcpy(fc_payload, resp_buf, resp_len);
472 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
473 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
474 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
480 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
482 struct fc_els_adisc *adisc;
483 struct fc_frame_header *fh;
484 struct fc_lport *lport = fcport->qedf->lport;
485 struct qedf_els_cb_arg *cb_arg = NULL;
486 struct qedf_ctx *qedf;
487 uint32_t r_a_tov = lport->r_a_tov;
491 fh = fc_frame_header_get(fp);
493 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
495 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
500 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
502 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
503 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
505 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
507 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
508 qedf_l2_els_compl, cb_arg, r_a_tov);
512 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
518 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
520 struct qedf_ioreq *orig_io_req;
521 struct qedf_ioreq *srr_req;
522 struct qedf_mp_req *mp_req;
523 struct fc_frame_header *mp_fc_hdr, *fh;
525 void *resp_buf, *fc_payload;
527 struct fc_lport *lport;
528 struct qedf_ctx *qedf;
532 srr_req = cb_arg->io_req;
533 qedf = srr_req->fcport->qedf;
536 orig_io_req = cb_arg->aborted_io_req;
541 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
543 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
544 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
545 cancel_delayed_work_sync(&orig_io_req->timeout_work);
547 refcount = kref_read(&orig_io_req->refcount);
548 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
549 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
550 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
552 /* If a SRR times out, simply free resources */
553 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
556 /* Normalize response data into struct fc_frame */
557 mp_req = &(srr_req->mp_req);
558 mp_fc_hdr = &(mp_req->resp_fc_hdr);
559 resp_len = mp_req->resp_len;
560 resp_buf = mp_req->resp_buf;
562 fp = fc_frame_alloc(lport, resp_len);
564 QEDF_ERR(&(qedf->dbg_ctx),
565 "fc_frame_alloc failure.\n");
569 /* Copy frame header from firmware into fp */
570 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
571 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
573 /* Copy payload from firmware into fp */
574 fc_payload = fc_frame_payload_get(fp, resp_len);
575 memcpy(fc_payload, resp_buf, resp_len);
577 opcode = fc_frame_payload_op(fp);
580 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
584 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
586 qedf_initiate_abts(orig_io_req, true);
592 /* Put reference for original command since SRR completed */
593 kref_put(&orig_io_req->refcount, qedf_release_cmd);
598 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
601 struct qedf_ctx *qedf;
602 struct qedf_rport *fcport;
603 struct fc_lport *lport;
604 struct qedf_els_cb_arg *cb_arg = NULL;
609 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
613 fcport = orig_io_req->fcport;
615 /* Check that fcport is still offloaded */
616 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
617 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
622 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
626 /* Take reference until SRR command completion */
627 kref_get(&orig_io_req->refcount);
632 r_a_tov = lport->r_a_tov;
634 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
635 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
636 memset(&srr, 0, sizeof(srr));
638 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
640 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
646 cb_arg->aborted_io_req = orig_io_req;
648 srr.srr_op = ELS_SRR;
649 srr.srr_ox_id = htons(orig_io_req->xid);
650 srr.srr_rx_id = htons(orig_io_req->rx_id);
651 srr.srr_rel_off = htonl(offset);
652 srr.srr_r_ctl = r_ctl;
654 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
655 qedf_srr_compl, cb_arg, r_a_tov);
659 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
660 "=0x%x\n", orig_io_req->xid);
662 /* If we fail to queue SRR, send ABTS to orig_io */
663 qedf_initiate_abts(orig_io_req, true);
664 kref_put(&orig_io_req->refcount, qedf_release_cmd);
666 /* Tell other threads that SRR is in progress */
667 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
672 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
673 u32 offset, u8 r_ctl)
675 struct qedf_rport *fcport;
677 struct qedf_els_cb_arg *cb_arg;
678 struct fcoe_wqe *sqe;
681 fcport = orig_io_req->fcport;
683 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
684 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
685 orig_io_req->xid, offset);
687 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
689 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
690 "for sequence cleanup\n");
694 /* Get reference for cleanup request */
695 kref_get(&orig_io_req->refcount);
697 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
698 cb_arg->offset = offset;
699 cb_arg->r_ctl = r_ctl;
700 orig_io_req->cb_arg = cb_arg;
702 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
703 QEDF_CLEANUP_TIMEOUT * HZ);
705 spin_lock_irqsave(&fcport->rport_lock, flags);
707 sqe_idx = qedf_get_sqe_idx(fcport);
708 sqe = &fcport->sq[sqe_idx];
709 memset(sqe, 0, sizeof(struct fcoe_wqe));
710 orig_io_req->task_params->sqe = sqe;
712 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
714 qedf_ring_doorbell(fcport);
716 spin_unlock_irqrestore(&fcport->rport_lock, flags);
719 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
720 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
723 struct qedf_els_cb_arg *cb_arg;
725 cb_arg = io_req->cb_arg;
727 /* If we timed out just free resources */
728 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
731 /* Kill the timer we put on the request */
732 cancel_delayed_work_sync(&io_req->timeout_work);
734 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
736 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
737 "abort, xid=0x%x.\n", io_req->xid);
740 kref_put(&io_req->refcount, qedf_release_cmd);
743 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
745 struct qedf_rport *fcport;
746 struct qedf_ioreq *new_io_req;
750 fcport = orig_io_req->fcport;
752 QEDF_ERR(NULL, "fcport is NULL.\n");
756 if (!orig_io_req->sc_cmd) {
757 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
758 "xid=0x%x.\n", orig_io_req->xid);
762 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
764 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
769 new_io_req->sc_cmd = orig_io_req->sc_cmd;
772 * This keeps the sc_cmd struct from being returned to the tape
773 * driver and being requeued twice. We do need to put a reference
774 * for the original I/O request since we will not do a SCSI completion
777 orig_io_req->sc_cmd = NULL;
778 kref_put(&orig_io_req->refcount, qedf_release_cmd);
780 spin_lock_irqsave(&fcport->rport_lock, flags);
782 /* kref for new command released in qedf_post_io_req on error */
783 if (qedf_post_io_req(fcport, new_io_req)) {
784 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
785 /* Return SQE to pool */
786 atomic_inc(&fcport->free_sqes);
788 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
789 "Reissued SCSI command from orig_xid=0x%x on "
790 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
792 * Abort the original I/O but do not return SCSI command as
793 * it has been reissued on another OX_ID.
795 spin_unlock_irqrestore(&fcport->rport_lock, flags);
796 qedf_initiate_abts(orig_io_req, false);
800 spin_unlock_irqrestore(&fcport->rport_lock, flags);
806 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
808 struct qedf_ioreq *orig_io_req;
809 struct qedf_ioreq *rec_req;
810 struct qedf_mp_req *mp_req;
811 struct fc_frame_header *mp_fc_hdr, *fh;
813 void *resp_buf, *fc_payload;
815 struct fc_lport *lport;
816 struct qedf_ctx *qedf;
819 struct fc_els_ls_rjt *rjt;
820 struct fc_els_rec_acc *acc;
823 struct scsi_cmnd *sc_cmd;
824 bool srr_needed = false;
826 rec_req = cb_arg->io_req;
827 qedf = rec_req->fcport->qedf;
830 orig_io_req = cb_arg->aborted_io_req;
835 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
836 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
837 cancel_delayed_work_sync(&orig_io_req->timeout_work);
839 refcount = kref_read(&orig_io_req->refcount);
840 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
841 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
842 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
844 /* If a REC times out, free resources */
845 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
848 /* Normalize response data into struct fc_frame */
849 mp_req = &(rec_req->mp_req);
850 mp_fc_hdr = &(mp_req->resp_fc_hdr);
851 resp_len = mp_req->resp_len;
852 acc = resp_buf = mp_req->resp_buf;
854 fp = fc_frame_alloc(lport, resp_len);
856 QEDF_ERR(&(qedf->dbg_ctx),
857 "fc_frame_alloc failure.\n");
861 /* Copy frame header from firmware into fp */
862 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
863 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
865 /* Copy payload from firmware into fp */
866 fc_payload = fc_frame_payload_get(fp, resp_len);
867 memcpy(fc_payload, resp_buf, resp_len);
869 opcode = fc_frame_payload_op(fp);
870 if (opcode == ELS_LS_RJT) {
871 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
872 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
873 "Received LS_RJT for REC: er_reason=0x%x, "
874 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
876 * The following response(s) mean that we need to reissue the
877 * request on another exchange. We need to do this without
878 * informing the upper layers lest it cause an application
881 if ((rjt->er_reason == ELS_RJT_LOGIC ||
882 rjt->er_reason == ELS_RJT_UNAB) &&
883 rjt->er_explan == ELS_EXPL_OXID_RXID) {
884 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
885 "Handle CMD LOST case.\n");
886 qedf_requeue_io_req(orig_io_req);
888 } else if (opcode == ELS_LS_ACC) {
889 offset = ntohl(acc->reca_fc4value);
890 e_stat = ntohl(acc->reca_e_stat);
891 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
892 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
894 if (e_stat & ESB_ST_SEQ_INIT) {
895 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
896 "Target has the seq init\n");
899 sc_cmd = orig_io_req->sc_cmd;
901 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
902 "sc_cmd is NULL for xid=0x%x.\n",
906 /* SCSI write case */
907 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
908 if (offset == orig_io_req->data_xfer_len) {
909 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
910 "WRITE - response lost.\n");
911 r_ctl = FC_RCTL_DD_CMD_STATUS;
915 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
916 "WRITE - XFER_RDY/DATA lost.\n");
917 r_ctl = FC_RCTL_DD_DATA_DESC;
918 /* Use data from warning CQE instead of REC */
919 offset = orig_io_req->tx_buf_off;
923 if (orig_io_req->rx_buf_off ==
924 orig_io_req->data_xfer_len) {
925 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
926 "READ - response lost.\n");
928 r_ctl = FC_RCTL_DD_CMD_STATUS;
931 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
932 "READ - DATA lost.\n");
934 * For read case we always set the offset to 0
935 * for sequence recovery task.
938 r_ctl = FC_RCTL_DD_SOL_DATA;
943 qedf_send_srr(orig_io_req, offset, r_ctl);
945 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
951 /* Put reference for original command since REC completed */
952 kref_put(&orig_io_req->refcount, qedf_release_cmd);
957 /* Assumes kref is already held by caller */
958 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
961 struct fc_els_rec rec;
962 struct qedf_rport *fcport;
963 struct fc_lport *lport;
964 struct qedf_els_cb_arg *cb_arg = NULL;
965 struct qedf_ctx *qedf;
971 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
975 fcport = orig_io_req->fcport;
977 /* Check that fcport is still offloaded */
978 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
979 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
984 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
988 /* Take reference until REC command completion */
989 kref_get(&orig_io_req->refcount);
994 r_a_tov = lport->r_a_tov;
996 memset(&rec, 0, sizeof(rec));
998 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
1000 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
1006 cb_arg->aborted_io_req = orig_io_req;
1008 rec.rec_cmd = ELS_REC;
1009 hton24(rec.rec_s_id, sid);
1010 rec.rec_ox_id = htons(orig_io_req->xid);
1012 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
1014 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
1015 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
1016 orig_io_req->xid, rec.rec_rx_id);
1017 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
1018 qedf_rec_compl, cb_arg, r_a_tov);
1022 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
1023 "=0x%x\n", orig_io_req->xid);
1025 kref_put(&orig_io_req->refcount, qedf_release_cmd);