1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
8 /* It's assumed that the lock is held when calling this function. */
9 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
10 void *data, uint32_t data_len,
11 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
12 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
14 struct qedf_ctx *qedf;
15 struct fc_lport *lport;
16 struct qedf_ioreq *els_req;
17 struct qedf_mp_req *mp_req;
18 struct fc_frame_header *fc_hdr;
19 struct e4_fcoe_task_context *task;
28 QEDF_ERR(NULL, "fcport is NULL");
36 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
38 rc = fc_remote_port_chkready(fcport->rport);
40 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
44 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
45 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
51 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
52 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
57 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
59 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
60 "Failed to alloc ELS request 0x%x\n", op);
65 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
66 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
68 els_req->sc_cmd = NULL;
69 els_req->cmd_type = QEDF_ELS;
70 els_req->fcport = fcport;
71 els_req->cb_func = cb_func;
72 cb_arg->io_req = els_req;
74 els_req->cb_arg = cb_arg;
75 els_req->data_xfer_len = data_len;
77 /* Record which cpu this request is associated with */
78 els_req->cpu = smp_processor_id();
80 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
81 rc = qedf_init_mp_req(els_req);
83 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
84 kref_put(&els_req->refcount, qedf_release_cmd);
90 /* Fill ELS Payload */
91 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
92 memcpy(mp_req->req_buf, data, data_len);
94 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
95 els_req->cb_func = NULL;
96 els_req->cb_arg = NULL;
97 kref_put(&els_req->refcount, qedf_release_cmd);
105 fc_hdr = &(mp_req->req_fc_hdr);
107 did = fcport->rdata->ids.port_id;
110 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
111 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
114 /* Obtain exchange id */
117 spin_lock_irqsave(&fcport->rport_lock, flags);
119 sqe_idx = qedf_get_sqe_idx(fcport);
120 sqe = &fcport->sq[sqe_idx];
121 memset(sqe, 0, sizeof(struct fcoe_wqe));
123 /* Initialize task context for this IO request */
124 task = qedf_get_task_mem(&qedf->tasks, xid);
125 qedf_init_mp_task(els_req, task, sqe);
127 /* Put timer on original I/O request */
129 qedf_cmd_timer_set(qedf, els_req, timer_msec);
132 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
134 qedf_ring_doorbell(fcport);
135 set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
137 spin_unlock_irqrestore(&fcport->rport_lock, flags);
142 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
143 struct qedf_ioreq *els_req)
145 struct fcoe_cqe_midpath_info *mp_info;
147 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
148 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
150 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
152 /* Kill the ELS timer */
153 cancel_delayed_work(&els_req->timeout_work);
155 /* Get ELS response length from CQE */
156 mp_info = &cqe->cqe_info.midpath_info;
157 els_req->mp_req.resp_len = mp_info->data_placement_size;
159 /* Parse ELS response */
160 if ((els_req->cb_func) && (els_req->cb_arg)) {
161 els_req->cb_func(els_req->cb_arg);
162 els_req->cb_arg = NULL;
165 kref_put(&els_req->refcount, qedf_release_cmd);
168 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
170 struct qedf_ioreq *orig_io_req;
171 struct qedf_ioreq *rrq_req;
172 struct qedf_ctx *qedf;
175 rrq_req = cb_arg->io_req;
176 qedf = rrq_req->fcport->qedf;
178 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
180 orig_io_req = cb_arg->aborted_io_req;
185 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
186 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
187 cancel_delayed_work_sync(&orig_io_req->timeout_work);
189 refcount = kref_read(&orig_io_req->refcount);
190 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
191 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
192 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
195 * This should return the aborted io_req to the command pool. Note that
196 * we need to check the refcound in case the original request was
197 * flushed but we get a completion on this xid.
199 if (orig_io_req && refcount > 0)
200 kref_put(&orig_io_req->refcount, qedf_release_cmd);
204 * Release a reference to the rrq request if we timed out as the
205 * rrq completion handler is called directly from the timeout handler
206 * and not from els_compl where the reference would have normally been
209 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
210 kref_put(&rrq_req->refcount, qedf_release_cmd);
214 /* Assumes kref is already held by caller */
215 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
218 struct fc_els_rrq rrq;
219 struct qedf_rport *fcport;
220 struct fc_lport *lport;
221 struct qedf_els_cb_arg *cb_arg = NULL;
222 struct qedf_ctx *qedf;
228 if (!aborted_io_req) {
229 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
233 fcport = aborted_io_req->fcport;
236 refcount = kref_read(&aborted_io_req->refcount);
238 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
239 aborted_io_req->xid, refcount);
240 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
244 /* Check that fcport is still offloaded */
245 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
246 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
251 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
258 * Sanity check that we can send a RRQ to make sure that refcount isn't
261 refcount = kref_read(&aborted_io_req->refcount);
263 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
264 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
265 aborted_io_req->xid, aborted_io_req, refcount);
271 r_a_tov = lport->r_a_tov;
273 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
274 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
275 aborted_io_req->xid);
276 memset(&rrq, 0, sizeof(rrq));
278 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
280 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
286 cb_arg->aborted_io_req = aborted_io_req;
288 rrq.rrq_cmd = ELS_RRQ;
289 hton24(rrq.rrq_s_id, sid);
290 rrq.rrq_ox_id = htons(aborted_io_req->xid);
292 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
294 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
295 qedf_rrq_compl, cb_arg, r_a_tov);
299 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
300 "req 0x%x\n", aborted_io_req->xid);
302 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
307 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
311 struct fc_lport *lport = fcport->qedf->lport;
312 struct fc_frame_header *fh;
315 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
317 /* Set the OXID we return to what libfc used */
318 if (l2_oxid != FC_XID_UNKNOWN)
319 fh->fh_ox_id = htons(l2_oxid);
321 /* Setup header fields */
322 fh->fh_r_ctl = FC_RCTL_ELS_REP;
323 fh->fh_type = FC_TYPE_ELS;
324 /* Last sequence, end sequence */
325 fh->fh_f_ctl[0] = 0x98;
326 hton24(fh->fh_d_id, lport->port_id);
327 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
328 fh->fh_rx_id = 0xffff;
330 /* Set frame attributes */
331 crc = fcoe_fc_crc(fp);
334 fr_sof(fp) = FC_SOF_I3;
335 fr_eof(fp) = FC_EOF_T;
336 fr_crc(fp) = cpu_to_le32(~crc);
338 /* Send completed request to libfc */
339 fc_exch_recv(lport, fp);
343 * In instances where an ELS command times out we may need to restart the
344 * rport by logging out and then logging back in.
346 void qedf_restart_rport(struct qedf_rport *fcport)
348 struct fc_lport *lport;
349 struct fc_rport_priv *rdata;
356 spin_lock_irqsave(&fcport->rport_lock, flags);
357 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
358 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
359 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
360 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
362 spin_unlock_irqrestore(&fcport->rport_lock, flags);
366 /* Set that we are now in reset */
367 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
368 spin_unlock_irqrestore(&fcport->rport_lock, flags);
370 rdata = fcport->rdata;
371 if (rdata && !kref_get_unless_zero(&rdata->kref)) {
372 fcport->rdata = NULL;
376 if (rdata && rdata->rp_state == RPORT_ST_READY) {
377 lport = fcport->qedf->lport;
378 port_id = rdata->ids.port_id;
379 QEDF_ERR(&(fcport->qedf->dbg_ctx),
380 "LOGO port_id=%x.\n", port_id);
381 fc_rport_logoff(rdata);
382 kref_put(&rdata->kref, fc_rport_destroy);
383 mutex_lock(&lport->disc.disc_mutex);
384 /* Recreate the rport and log back in */
385 rdata = fc_rport_create(lport, port_id);
387 mutex_unlock(&lport->disc.disc_mutex);
388 fc_rport_login(rdata);
389 fcport->rdata = rdata;
391 mutex_unlock(&lport->disc.disc_mutex);
392 fcport->rdata = NULL;
395 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
398 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
400 struct qedf_ioreq *els_req;
401 struct qedf_rport *fcport;
402 struct qedf_mp_req *mp_req;
404 struct fc_frame_header *fh, *mp_fc_hdr;
405 void *resp_buf, *fc_payload;
409 l2_oxid = cb_arg->l2_oxid;
410 els_req = cb_arg->io_req;
413 QEDF_ERR(NULL, "els_req is NULL.\n");
418 * If we are flushing the command just free the cb_arg as none of the
419 * response data will be valid.
421 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
424 fcport = els_req->fcport;
425 mp_req = &(els_req->mp_req);
426 mp_fc_hdr = &(mp_req->resp_fc_hdr);
427 resp_len = mp_req->resp_len;
428 resp_buf = mp_req->resp_buf;
431 * If a middle path ELS command times out, don't try to return
432 * the command but rather do any internal cleanup and then libfc
433 * timeout the command and clean up its internal resources.
435 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
437 * If ADISC times out, libfc will timeout the exchange and then
438 * try to send a PLOGI which will timeout since the session is
439 * still offloaded. Force libfc to logout the session which
440 * will offload the connection and allow the PLOGI response to
441 * flow over the LL2 path.
443 if (cb_arg->op == ELS_ADISC)
444 qedf_restart_rport(fcport);
448 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
449 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
450 "beyond page size.\n");
454 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
456 QEDF_ERR(&(fcport->qedf->dbg_ctx),
457 "fc_frame_alloc failure.\n");
461 /* Copy frame header from firmware into fp */
462 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
463 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
465 /* Copy payload from firmware into fp */
466 fc_payload = fc_frame_payload_get(fp, resp_len);
467 memcpy(fc_payload, resp_buf, resp_len);
469 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
470 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
471 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
477 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
479 struct fc_els_adisc *adisc;
480 struct fc_frame_header *fh;
481 struct fc_lport *lport = fcport->qedf->lport;
482 struct qedf_els_cb_arg *cb_arg = NULL;
483 struct qedf_ctx *qedf;
484 uint32_t r_a_tov = lport->r_a_tov;
488 fh = fc_frame_header_get(fp);
490 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
492 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
497 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
499 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
500 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
502 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
504 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
505 qedf_l2_els_compl, cb_arg, r_a_tov);
509 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
515 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
517 struct qedf_ioreq *orig_io_req;
518 struct qedf_ioreq *srr_req;
519 struct qedf_mp_req *mp_req;
520 struct fc_frame_header *mp_fc_hdr, *fh;
522 void *resp_buf, *fc_payload;
524 struct fc_lport *lport;
525 struct qedf_ctx *qedf;
529 srr_req = cb_arg->io_req;
530 qedf = srr_req->fcport->qedf;
533 orig_io_req = cb_arg->aborted_io_req;
538 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
540 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
541 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
542 cancel_delayed_work_sync(&orig_io_req->timeout_work);
544 refcount = kref_read(&orig_io_req->refcount);
545 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
546 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
547 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
549 /* If a SRR times out, simply free resources */
550 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
553 /* Normalize response data into struct fc_frame */
554 mp_req = &(srr_req->mp_req);
555 mp_fc_hdr = &(mp_req->resp_fc_hdr);
556 resp_len = mp_req->resp_len;
557 resp_buf = mp_req->resp_buf;
559 fp = fc_frame_alloc(lport, resp_len);
561 QEDF_ERR(&(qedf->dbg_ctx),
562 "fc_frame_alloc failure.\n");
566 /* Copy frame header from firmware into fp */
567 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
568 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
570 /* Copy payload from firmware into fp */
571 fc_payload = fc_frame_payload_get(fp, resp_len);
572 memcpy(fc_payload, resp_buf, resp_len);
574 opcode = fc_frame_payload_op(fp);
577 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
581 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
583 qedf_initiate_abts(orig_io_req, true);
589 /* Put reference for original command since SRR completed */
590 kref_put(&orig_io_req->refcount, qedf_release_cmd);
595 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
598 struct qedf_ctx *qedf;
599 struct qedf_rport *fcport;
600 struct fc_lport *lport;
601 struct qedf_els_cb_arg *cb_arg = NULL;
606 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
610 fcport = orig_io_req->fcport;
612 /* Check that fcport is still offloaded */
613 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
614 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
619 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
623 /* Take reference until SRR command completion */
624 kref_get(&orig_io_req->refcount);
628 r_a_tov = lport->r_a_tov;
630 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
631 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
632 memset(&srr, 0, sizeof(srr));
634 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
636 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
642 cb_arg->aborted_io_req = orig_io_req;
644 srr.srr_op = ELS_SRR;
645 srr.srr_ox_id = htons(orig_io_req->xid);
646 srr.srr_rx_id = htons(orig_io_req->rx_id);
647 srr.srr_rel_off = htonl(offset);
648 srr.srr_r_ctl = r_ctl;
650 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
651 qedf_srr_compl, cb_arg, r_a_tov);
655 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
656 "=0x%x\n", orig_io_req->xid);
658 /* If we fail to queue SRR, send ABTS to orig_io */
659 qedf_initiate_abts(orig_io_req, true);
660 kref_put(&orig_io_req->refcount, qedf_release_cmd);
662 /* Tell other threads that SRR is in progress */
663 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
668 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
669 u32 offset, u8 r_ctl)
671 struct qedf_rport *fcport;
673 struct qedf_els_cb_arg *cb_arg;
674 struct fcoe_wqe *sqe;
677 fcport = orig_io_req->fcport;
679 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
680 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
681 orig_io_req->xid, offset);
683 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
685 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
686 "for sequence cleanup\n");
690 /* Get reference for cleanup request */
691 kref_get(&orig_io_req->refcount);
693 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
694 cb_arg->offset = offset;
695 cb_arg->r_ctl = r_ctl;
696 orig_io_req->cb_arg = cb_arg;
698 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
699 QEDF_CLEANUP_TIMEOUT * HZ);
701 spin_lock_irqsave(&fcport->rport_lock, flags);
703 sqe_idx = qedf_get_sqe_idx(fcport);
704 sqe = &fcport->sq[sqe_idx];
705 memset(sqe, 0, sizeof(struct fcoe_wqe));
706 orig_io_req->task_params->sqe = sqe;
708 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
710 qedf_ring_doorbell(fcport);
712 spin_unlock_irqrestore(&fcport->rport_lock, flags);
715 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
716 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
719 struct qedf_els_cb_arg *cb_arg;
721 cb_arg = io_req->cb_arg;
723 /* If we timed out just free resources */
724 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
727 /* Kill the timer we put on the request */
728 cancel_delayed_work_sync(&io_req->timeout_work);
730 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
732 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
733 "abort, xid=0x%x.\n", io_req->xid);
736 kref_put(&io_req->refcount, qedf_release_cmd);
739 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
741 struct qedf_rport *fcport;
742 struct qedf_ioreq *new_io_req;
746 fcport = orig_io_req->fcport;
748 QEDF_ERR(NULL, "fcport is NULL.\n");
752 if (!orig_io_req->sc_cmd) {
753 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
754 "xid=0x%x.\n", orig_io_req->xid);
758 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
760 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
765 new_io_req->sc_cmd = orig_io_req->sc_cmd;
768 * This keeps the sc_cmd struct from being returned to the tape
769 * driver and being requeued twice. We do need to put a reference
770 * for the original I/O request since we will not do a SCSI completion
773 orig_io_req->sc_cmd = NULL;
774 kref_put(&orig_io_req->refcount, qedf_release_cmd);
776 spin_lock_irqsave(&fcport->rport_lock, flags);
778 /* kref for new command released in qedf_post_io_req on error */
779 if (qedf_post_io_req(fcport, new_io_req)) {
780 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
781 /* Return SQE to pool */
782 atomic_inc(&fcport->free_sqes);
784 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
785 "Reissued SCSI command from orig_xid=0x%x on "
786 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
788 * Abort the original I/O but do not return SCSI command as
789 * it has been reissued on another OX_ID.
791 spin_unlock_irqrestore(&fcport->rport_lock, flags);
792 qedf_initiate_abts(orig_io_req, false);
796 spin_unlock_irqrestore(&fcport->rport_lock, flags);
802 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
804 struct qedf_ioreq *orig_io_req;
805 struct qedf_ioreq *rec_req;
806 struct qedf_mp_req *mp_req;
807 struct fc_frame_header *mp_fc_hdr, *fh;
809 void *resp_buf, *fc_payload;
811 struct fc_lport *lport;
812 struct qedf_ctx *qedf;
815 struct fc_els_ls_rjt *rjt;
816 struct fc_els_rec_acc *acc;
819 struct scsi_cmnd *sc_cmd;
820 bool srr_needed = false;
822 rec_req = cb_arg->io_req;
823 qedf = rec_req->fcport->qedf;
826 orig_io_req = cb_arg->aborted_io_req;
831 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
832 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
833 cancel_delayed_work_sync(&orig_io_req->timeout_work);
835 refcount = kref_read(&orig_io_req->refcount);
836 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
837 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
838 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
840 /* If a REC times out, free resources */
841 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
844 /* Normalize response data into struct fc_frame */
845 mp_req = &(rec_req->mp_req);
846 mp_fc_hdr = &(mp_req->resp_fc_hdr);
847 resp_len = mp_req->resp_len;
848 acc = resp_buf = mp_req->resp_buf;
850 fp = fc_frame_alloc(lport, resp_len);
852 QEDF_ERR(&(qedf->dbg_ctx),
853 "fc_frame_alloc failure.\n");
857 /* Copy frame header from firmware into fp */
858 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
859 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
861 /* Copy payload from firmware into fp */
862 fc_payload = fc_frame_payload_get(fp, resp_len);
863 memcpy(fc_payload, resp_buf, resp_len);
865 opcode = fc_frame_payload_op(fp);
866 if (opcode == ELS_LS_RJT) {
867 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
868 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
869 "Received LS_RJT for REC: er_reason=0x%x, "
870 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
872 * The following response(s) mean that we need to reissue the
873 * request on another exchange. We need to do this without
874 * informing the upper layers lest it cause an application
877 if ((rjt->er_reason == ELS_RJT_LOGIC ||
878 rjt->er_reason == ELS_RJT_UNAB) &&
879 rjt->er_explan == ELS_EXPL_OXID_RXID) {
880 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
881 "Handle CMD LOST case.\n");
882 qedf_requeue_io_req(orig_io_req);
884 } else if (opcode == ELS_LS_ACC) {
885 offset = ntohl(acc->reca_fc4value);
886 e_stat = ntohl(acc->reca_e_stat);
887 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
888 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
890 if (e_stat & ESB_ST_SEQ_INIT) {
891 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
892 "Target has the seq init\n");
895 sc_cmd = orig_io_req->sc_cmd;
897 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
898 "sc_cmd is NULL for xid=0x%x.\n",
902 /* SCSI write case */
903 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
904 if (offset == orig_io_req->data_xfer_len) {
905 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
906 "WRITE - response lost.\n");
907 r_ctl = FC_RCTL_DD_CMD_STATUS;
911 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
912 "WRITE - XFER_RDY/DATA lost.\n");
913 r_ctl = FC_RCTL_DD_DATA_DESC;
914 /* Use data from warning CQE instead of REC */
915 offset = orig_io_req->tx_buf_off;
919 if (orig_io_req->rx_buf_off ==
920 orig_io_req->data_xfer_len) {
921 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
922 "READ - response lost.\n");
924 r_ctl = FC_RCTL_DD_CMD_STATUS;
927 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
928 "READ - DATA lost.\n");
930 * For read case we always set the offset to 0
931 * for sequence recovery task.
934 r_ctl = FC_RCTL_DD_SOL_DATA;
939 qedf_send_srr(orig_io_req, offset, r_ctl);
941 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
947 /* Put reference for original command since REC completed */
948 kref_put(&orig_io_req->refcount, qedf_release_cmd);
953 /* Assumes kref is already held by caller */
954 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
957 struct fc_els_rec rec;
958 struct qedf_rport *fcport;
959 struct fc_lport *lport;
960 struct qedf_els_cb_arg *cb_arg = NULL;
961 struct qedf_ctx *qedf;
967 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
971 fcport = orig_io_req->fcport;
973 /* Check that fcport is still offloaded */
974 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
975 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
980 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
984 /* Take reference until REC command completion */
985 kref_get(&orig_io_req->refcount);
990 r_a_tov = lport->r_a_tov;
992 memset(&rec, 0, sizeof(rec));
994 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
996 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
1002 cb_arg->aborted_io_req = orig_io_req;
1004 rec.rec_cmd = ELS_REC;
1005 hton24(rec.rec_s_id, sid);
1006 rec.rec_ox_id = htons(orig_io_req->xid);
1008 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
1010 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
1011 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
1012 orig_io_req->xid, rec.rec_rx_id);
1013 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
1014 qedf_rec_compl, cb_arg, r_a_tov);
1018 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
1019 "=0x%x\n", orig_io_req->xid);
1021 kref_put(&orig_io_req->refcount, qedf_release_cmd);