2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46 module_param(qlini_mode, charp, S_IRUGO);
47 MODULE_PARM_DESC(qlini_mode,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
58 * From scsi/fc/fc_fcp.h
60 enum fcp_resp_rsp_codes {
62 FCP_DATA_LEN_INVALID = 1,
63 FCP_CMND_FIELDS_INVALID = 2,
64 FCP_DATA_PARAM_MISMATCH = 3,
67 FCP_TMF_INVALID_LUN = 9,
71 * fc_pri_ta from scsi/fc/fc_fcp.h
73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
77 #define FCP_PTA_MASK 7 /* mask for task attribute field */
78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
94 /* Predefs for callbacks handed to qla2xxx LLD */
95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96 struct atio_from_isp *pkt);
97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99 int fn, void *iocb, int flags);
100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101 *cmd, struct atio_from_isp *atio, int ha_locked);
102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103 struct qla_tgt_srr_imm *imm, int ha_lock);
107 static struct kmem_cache *qla_tgt_cmd_cachep;
108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109 static mempool_t *qla_tgt_mgmt_cmd_mempool;
110 static struct workqueue_struct *qla_tgt_wq;
111 static DEFINE_MUTEX(qla_tgt_mutex);
112 static LIST_HEAD(qla_tgt_glist);
114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
117 const uint8_t *port_name)
119 struct qla_tgt_sess *sess;
121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
122 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
129 /* Might release hw lock, then reaquire!! */
130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
132 /* Send marker if required */
133 if (unlikely(vha->marker_needed != 0)) {
134 int rc = qla2x00_issue_marker(vha, vha_locked);
135 if (rc != QLA_SUCCESS) {
136 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137 "qla_target(%d): issue_marker() failed\n",
146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
149 struct qla_hw_data *ha = vha->hw;
152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
155 if (vha->d_id.b.al_pa == d_id[2])
158 BUG_ON(ha->tgt.tgt_vp_map == NULL);
159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161 return ha->tgt.tgt_vp_map[vp_idx].vha;
167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
170 struct qla_hw_data *ha = vha->hw;
172 if (vha->vp_idx == vp_idx)
175 BUG_ON(ha->tgt.tgt_vp_map == NULL);
176 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177 return ha->tgt.tgt_vp_map[vp_idx].vha;
182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183 struct atio_from_isp *atio)
185 switch (atio->u.raw.entry_type) {
188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189 atio->u.isp24.fcp_hdr.d_id);
190 if (unlikely(NULL == host)) {
191 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192 "qla_target(%d): Received ATIO_TYPE7 "
193 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194 atio->u.isp24.fcp_hdr.d_id[0],
195 atio->u.isp24.fcp_hdr.d_id[1],
196 atio->u.isp24.fcp_hdr.d_id[2]);
199 qlt_24xx_atio_pkt(host, atio);
203 case IMMED_NOTIFY_TYPE:
205 struct scsi_qla_host *host = vha;
206 struct imm_ntfy_from_isp *entry =
207 (struct imm_ntfy_from_isp *)atio;
209 if ((entry->u.isp24.vp_index != 0xFF) &&
210 (entry->u.isp24.nport_handle != 0xFFFF)) {
211 host = qlt_find_host_by_vp_idx(vha,
212 entry->u.isp24.vp_index);
213 if (unlikely(!host)) {
214 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215 "qla_target(%d): Received "
216 "ATIO (IMMED_NOTIFY_TYPE) "
217 "with unknown vp_index %d\n",
218 vha->vp_idx, entry->u.isp24.vp_index);
222 qlt_24xx_atio_pkt(host, atio);
227 ql_dbg(ql_dbg_tgt, vha, 0xe040,
228 "qla_target(%d): Received unknown ATIO atio "
229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
238 switch (pkt->entry_type) {
241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
244 if (unlikely(!host)) {
245 ql_dbg(ql_dbg_tgt, vha, 0xe041,
246 "qla_target(%d): Response pkt (CTIO_TYPE7) "
247 "received, with unknown vp_index %d\n",
248 vha->vp_idx, entry->vp_index);
251 qlt_response_pkt(host, pkt);
255 case IMMED_NOTIFY_TYPE:
257 struct scsi_qla_host *host = vha;
258 struct imm_ntfy_from_isp *entry =
259 (struct imm_ntfy_from_isp *)pkt;
261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262 if (unlikely(!host)) {
263 ql_dbg(ql_dbg_tgt, vha, 0xe042,
264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265 "received, with unknown vp_index %d\n",
266 vha->vp_idx, entry->u.isp24.vp_index);
269 qlt_response_pkt(host, pkt);
273 case NOTIFY_ACK_TYPE:
275 struct scsi_qla_host *host = vha;
276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
278 if (0xFF != entry->u.isp24.vp_index) {
279 host = qlt_find_host_by_vp_idx(vha,
280 entry->u.isp24.vp_index);
281 if (unlikely(!host)) {
282 ql_dbg(ql_dbg_tgt, vha, 0xe043,
283 "qla_target(%d): Response "
284 "pkt (NOTIFY_ACK_TYPE) "
285 "received, with unknown "
286 "vp_index %d\n", vha->vp_idx,
287 entry->u.isp24.vp_index);
291 qlt_response_pkt(host, pkt);
297 struct abts_recv_from_24xx *entry =
298 (struct abts_recv_from_24xx *)pkt;
299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
301 if (unlikely(!host)) {
302 ql_dbg(ql_dbg_tgt, vha, 0xe044,
303 "qla_target(%d): Response pkt "
304 "(ABTS_RECV_24XX) received, with unknown "
305 "vp_index %d\n", vha->vp_idx, entry->vp_index);
308 qlt_response_pkt(host, pkt);
314 struct abts_resp_to_24xx *entry =
315 (struct abts_resp_to_24xx *)pkt;
316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
318 if (unlikely(!host)) {
319 ql_dbg(ql_dbg_tgt, vha, 0xe045,
320 "qla_target(%d): Response pkt "
321 "(ABTS_RECV_24XX) received, with unknown "
322 "vp_index %d\n", vha->vp_idx, entry->vp_index);
325 qlt_response_pkt(host, pkt);
330 qlt_response_pkt(vha, pkt);
336 static void qlt_free_session_done(struct work_struct *work)
338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
340 struct qla_tgt *tgt = sess->tgt;
341 struct scsi_qla_host *vha = sess->vha;
342 struct qla_hw_data *ha = vha->hw;
346 * Release the target session for FC Nexus from fabric module code.
348 if (sess->se_sess != NULL)
349 ha->tgt.tgt_ops->free_session(sess);
351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352 "Unregistration of sess %p finished\n", sess);
356 * We need to protect against race, when tgt is freed before or
360 if (tgt->sess_count == 0)
361 wake_up_all(&tgt->waitQ);
364 /* ha->hardware_lock supposed to be held on entry */
365 void qlt_unreg_sess(struct qla_tgt_sess *sess)
367 struct scsi_qla_host *vha = sess->vha;
369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
371 list_del(&sess->sess_list_entry);
373 list_del(&sess->del_list_entry);
375 INIT_WORK(&sess->free_work, qlt_free_session_done);
376 schedule_work(&sess->free_work);
378 EXPORT_SYMBOL(qlt_unreg_sess);
380 /* ha->hardware_lock supposed to be held on entry */
381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
383 struct qla_hw_data *ha = vha->hw;
384 struct qla_tgt_sess *sess = NULL;
385 uint32_t unpacked_lun, lun = 0;
388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
391 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392 if (loop_id == 0xFFFF) {
393 #if 0 /* FIXME: Re-enable Global event handling.. */
395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399 typeof(*sess), sess_list_entry);
401 case QLA_TGT_NEXUS_LOSS_SESS:
402 mcmd = QLA_TGT_NEXUS_LOSS;
404 case QLA_TGT_ABORT_ALL_SESS:
405 mcmd = QLA_TGT_ABORT_ALL;
407 case QLA_TGT_NEXUS_LOSS:
408 case QLA_TGT_ABORT_ALL:
411 ql_dbg(ql_dbg_tgt, vha, 0xe046,
412 "qla_target(%d): Not allowed "
413 "command %x in %s", vha->vp_idx,
422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
425 ql_dbg(ql_dbg_tgt, vha, 0xe000,
426 "Using sess for qla_tgt_reset: %p\n", sess);
432 ql_dbg(ql_dbg_tgt, vha, 0xe047,
433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
434 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
437 lun = a->u.isp24.fcp_cmnd.lun;
438 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
440 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
441 iocb, QLA24XX_MGMT_SEND_NACK);
444 /* ha->hardware_lock supposed to be held on entry */
445 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
448 struct qla_tgt *tgt = sess->tgt;
449 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
454 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
455 "Scheduling sess %p for deletion\n", sess);
456 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
462 sess->expires = jiffies + dev_loss_tmo * HZ;
464 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
466 "deletion in %u secs (expires: %lu) immed: %d\n",
467 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
468 sess->expires, immediate);
471 schedule_delayed_work(&tgt->sess_del_work, 0);
473 schedule_delayed_work(&tgt->sess_del_work,
474 sess->expires - jiffies);
477 /* ha->hardware_lock supposed to be held on entry */
478 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
480 struct qla_tgt_sess *sess;
482 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
483 qlt_schedule_sess_for_deletion(sess, true);
485 /* At this point tgt could be already dead */
488 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
491 struct qla_hw_data *ha = vha->hw;
492 dma_addr_t gid_list_dma;
493 struct gid_list_info *gid_list;
498 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
499 &gid_list_dma, GFP_KERNEL);
501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
502 "qla_target(%d): DMA Alloc failed of %u\n",
503 vha->vp_idx, qla2x00_gid_list_size(ha));
507 /* Get list of logged in devices */
508 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
509 if (rc != QLA_SUCCESS) {
510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
511 "qla_target(%d): get_id_list() failed: %x\n",
514 goto out_free_id_list;
517 id_iter = (char *)gid_list;
519 for (i = 0; i < entries; i++) {
520 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
521 if ((gid->al_pa == s_id[2]) &&
522 (gid->area == s_id[1]) &&
523 (gid->domain == s_id[0])) {
524 *loop_id = le16_to_cpu(gid->loop_id);
528 id_iter += ha->gid_list_info_size;
532 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
533 gid_list, gid_list_dma);
537 /* ha->hardware_lock supposed to be held on entry */
538 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
540 BUG_ON(!sess->deleted);
542 list_del(&sess->del_list_entry);
546 static void qlt_del_sess_work_fn(struct delayed_work *work)
548 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess;
553 unsigned long flags, elapsed;
555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
560 if (time_after_eq(elapsed, sess->expires)) {
561 qlt_undelete_sess(sess);
563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
564 "Timeout: sess %p about to be deleted\n",
566 ha->tgt.tgt_ops->shutdown_sess(sess);
567 ha->tgt.tgt_ops->put_sess(sess);
569 schedule_delayed_work(&tgt->sess_del_work,
570 sess->expires - elapsed);
574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
578 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
579 * Caller must put it.
581 static struct qla_tgt_sess *qlt_create_sess(
582 struct scsi_qla_host *vha,
586 struct qla_hw_data *ha = vha->hw;
587 struct qla_tgt_sess *sess;
589 unsigned char be_sid[3];
591 /* Check to avoid double sessions */
592 spin_lock_irqsave(&ha->hardware_lock, flags);
593 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
597 "Double sess %p found (s_id %x:%x:%x, "
598 "loop_id %d), updating to d_id %x:%x:%x, "
599 "loop_id %d", sess, sess->s_id.b.domain,
600 sess->s_id.b.al_pa, sess->s_id.b.area,
601 sess->loop_id, fcport->d_id.b.domain,
602 fcport->d_id.b.al_pa, fcport->d_id.b.area,
606 qlt_undelete_sess(sess);
608 kref_get(&sess->se_sess->sess_kref);
609 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
610 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
612 if (sess->local && !local)
614 spin_unlock_irqrestore(&ha->hardware_lock, flags);
619 spin_unlock_irqrestore(&ha->hardware_lock, flags);
621 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
623 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
624 "qla_target(%u): session allocation failed, all commands "
625 "from port %8phC will be refused", vha->vp_idx,
630 sess->tgt = vha->vha_tgt.qla_tgt;
632 sess->s_id = fcport->d_id;
633 sess->loop_id = fcport->loop_id;
636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
638 sess, vha->vha_tgt.qla_tgt);
640 be_sid[0] = sess->s_id.b.domain;
641 be_sid[1] = sess->s_id.b.area;
642 be_sid[2] = sess->s_id.b.al_pa;
644 * Determine if this fc_port->port_name is allowed to access
645 * target mode using explict NodeACLs+MappedLUNs, or using
646 * TPG demo mode. If this is successful a target mode FC nexus
649 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
650 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
655 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
656 * access across ->hardware_lock reaquire.
658 kref_get(&sess->se_sess->sess_kref);
660 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
661 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
664 spin_lock_irqsave(&ha->hardware_lock, flags);
665 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
666 vha->vha_tgt.qla_tgt->sess_count++;
667 spin_unlock_irqrestore(&ha->hardware_lock, flags);
669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
670 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
671 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
672 vha->vp_idx, local ? "local " : "", fcport->port_name,
673 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
674 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
680 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
682 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
684 struct qla_hw_data *ha = vha->hw;
685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
686 struct qla_tgt_sess *sess;
689 if (!vha->hw->tgt.tgt_ops)
692 if (!tgt || (fcport->port_type != FCT_INITIATOR))
695 if (qla_ini_mode_enabled(vha))
698 spin_lock_irqsave(&ha->hardware_lock, flags);
700 spin_unlock_irqrestore(&ha->hardware_lock, flags);
703 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
705 spin_unlock_irqrestore(&ha->hardware_lock, flags);
707 mutex_lock(&vha->vha_tgt.tgt_mutex);
708 sess = qlt_create_sess(vha, fcport, false);
709 mutex_unlock(&vha->vha_tgt.tgt_mutex);
711 spin_lock_irqsave(&ha->hardware_lock, flags);
713 kref_get(&sess->se_sess->sess_kref);
716 qlt_undelete_sess(sess);
718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
719 "qla_target(%u): %ssession for port %8phC "
720 "(loop ID %d) reappeared\n", vha->vp_idx,
721 sess->local ? "local " : "", sess->port_name,
724 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
725 "Reappeared sess %p\n", sess);
727 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
728 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
731 if (sess && sess->local) {
732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
733 "qla_target(%u): local session for "
734 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
735 fcport->port_name, sess->loop_id);
738 ha->tgt.tgt_ops->put_sess(sess);
739 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
744 struct qla_hw_data *ha = vha->hw;
745 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
746 struct qla_tgt_sess *sess;
749 if (!vha->hw->tgt.tgt_ops)
752 if (!tgt || (fcport->port_type != FCT_INITIATOR))
755 spin_lock_irqsave(&ha->hardware_lock, flags);
757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
760 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
762 spin_unlock_irqrestore(&ha->hardware_lock, flags);
766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
769 qlt_schedule_sess_for_deletion(sess, false);
770 spin_unlock_irqrestore(&ha->hardware_lock, flags);
773 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
775 struct qla_hw_data *ha = tgt->ha;
779 * We need to protect against race, when tgt is freed before or
782 spin_lock_irqsave(&ha->hardware_lock, flags);
783 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
784 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
785 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
786 res = (tgt->sess_count == 0);
787 spin_unlock_irqrestore(&ha->hardware_lock, flags);
792 /* Called by tcm_qla2xxx configfs code */
793 int qlt_stop_phase1(struct qla_tgt *tgt)
795 struct scsi_qla_host *vha = tgt->vha;
796 struct qla_hw_data *ha = tgt->ha;
799 mutex_lock(&qla_tgt_mutex);
800 if (!vha->fc_vport) {
801 struct Scsi_Host *sh = vha->host;
802 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
805 spin_lock_irqsave(sh->host_lock, flags);
806 npiv_vports = (fc_host->npiv_vports_inuse);
807 spin_unlock_irqrestore(sh->host_lock, flags);
810 mutex_unlock(&qla_tgt_mutex);
814 if (tgt->tgt_stop || tgt->tgt_stopped) {
815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
816 "Already in tgt->tgt_stop or tgt_stopped state\n");
817 mutex_unlock(&qla_tgt_mutex);
821 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
824 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
825 * Lock is needed, because we still can get an incoming packet.
827 mutex_lock(&vha->vha_tgt.tgt_mutex);
828 spin_lock_irqsave(&ha->hardware_lock, flags);
830 qlt_clear_tgt_db(tgt, true);
831 spin_unlock_irqrestore(&ha->hardware_lock, flags);
832 mutex_unlock(&vha->vha_tgt.tgt_mutex);
833 mutex_unlock(&qla_tgt_mutex);
835 flush_delayed_work(&tgt->sess_del_work);
837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
838 "Waiting for sess works (tgt %p)", tgt);
839 spin_lock_irqsave(&tgt->sess_work_lock, flags);
840 while (!list_empty(&tgt->sess_works_list)) {
841 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
842 flush_scheduled_work();
843 spin_lock_irqsave(&tgt->sess_work_lock, flags);
845 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
847 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
848 "Waiting for tgt %p: list_empty(sess_list)=%d "
849 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
852 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
855 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
856 qlt_disable_vha(vha);
858 /* Wait for sessions to clear out (just in case) */
859 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
862 EXPORT_SYMBOL(qlt_stop_phase1);
864 /* Called by tcm_qla2xxx configfs code */
865 void qlt_stop_phase2(struct qla_tgt *tgt)
867 struct qla_hw_data *ha = tgt->ha;
868 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
871 if (tgt->tgt_stopped) {
872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
873 "Already in tgt->tgt_stopped state\n");
878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
879 "Waiting for %d IRQ commands to complete (tgt %p)",
880 tgt->irq_cmd_count, tgt);
882 mutex_lock(&vha->vha_tgt.tgt_mutex);
883 spin_lock_irqsave(&ha->hardware_lock, flags);
884 while (tgt->irq_cmd_count != 0) {
885 spin_unlock_irqrestore(&ha->hardware_lock, flags);
887 spin_lock_irqsave(&ha->hardware_lock, flags);
890 tgt->tgt_stopped = 1;
891 spin_unlock_irqrestore(&ha->hardware_lock, flags);
892 mutex_unlock(&vha->vha_tgt.tgt_mutex);
894 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
897 EXPORT_SYMBOL(qlt_stop_phase2);
899 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
900 static void qlt_release(struct qla_tgt *tgt)
902 scsi_qla_host_t *vha = tgt->vha;
904 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
905 qlt_stop_phase2(tgt);
907 vha->vha_tgt.qla_tgt = NULL;
909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
910 "Release of tgt %p finished\n", tgt);
915 /* ha->hardware_lock supposed to be held on entry */
916 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
917 const void *param, unsigned int param_size)
919 struct qla_tgt_sess_work_param *prm;
922 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
924 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
925 "qla_target(%d): Unable to create session "
926 "work, command will be refused", 0);
930 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
931 "Scheduling work (type %d, prm %p)"
932 " to find session for param %p (size %d, tgt %p)\n",
933 type, prm, param, param_size, tgt);
936 memcpy(&prm->tm_iocb, param, param_size);
938 spin_lock_irqsave(&tgt->sess_work_lock, flags);
939 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
940 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
942 schedule_work(&tgt->sess_work);
948 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
950 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
951 struct imm_ntfy_from_isp *ntfy,
952 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
953 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
955 struct qla_hw_data *ha = vha->hw;
957 struct nack_to_isp *nack;
959 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
961 /* Send marker if required */
962 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
965 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
967 ql_dbg(ql_dbg_tgt, vha, 0xe049,
968 "qla_target(%d): %s failed: unable to allocate "
969 "request packet\n", vha->vp_idx, __func__);
973 if (vha->vha_tgt.qla_tgt != NULL)
974 vha->vha_tgt.qla_tgt->notify_ack_expected++;
976 pkt->entry_type = NOTIFY_ACK_TYPE;
977 pkt->entry_count = 1;
979 nack = (struct nack_to_isp *)pkt;
980 nack->ox_id = ntfy->ox_id;
982 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
983 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
984 nack->u.isp24.flags = ntfy->u.isp24.flags &
985 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
987 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
988 nack->u.isp24.status = ntfy->u.isp24.status;
989 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
990 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
991 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
992 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
993 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
994 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
995 nack->u.isp24.srr_reject_code = srr_reject_code;
996 nack->u.isp24.srr_reject_code_expl = srr_explan;
997 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
999 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1000 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1001 vha->vp_idx, nack->u.isp24.status);
1003 qla2x00_start_iocbs(vha, vha->req);
1007 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1009 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1010 struct abts_recv_from_24xx *abts, uint32_t status,
1013 struct qla_hw_data *ha = vha->hw;
1014 struct abts_resp_to_24xx *resp;
1018 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1019 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1022 /* Send marker if required */
1023 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1026 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1028 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1029 "qla_target(%d): %s failed: unable to allocate "
1030 "request packet", vha->vp_idx, __func__);
1034 resp->entry_type = ABTS_RESP_24XX;
1035 resp->entry_count = 1;
1036 resp->nport_handle = abts->nport_handle;
1037 resp->vp_index = vha->vp_idx;
1038 resp->sof_type = abts->sof_type;
1039 resp->exchange_address = abts->exchange_address;
1040 resp->fcp_hdr_le = abts->fcp_hdr_le;
1041 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1042 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1043 F_CTL_SEQ_INITIATIVE);
1044 p = (uint8_t *)&f_ctl;
1045 resp->fcp_hdr_le.f_ctl[0] = *p++;
1046 resp->fcp_hdr_le.f_ctl[1] = *p++;
1047 resp->fcp_hdr_le.f_ctl[2] = *p;
1049 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1050 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1051 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1052 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1053 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1054 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1056 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1057 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1058 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1059 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1060 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1061 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1063 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1064 if (status == FCP_TMF_CMPL) {
1065 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1066 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1067 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1068 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1069 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1070 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1072 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1073 resp->payload.ba_rjt.reason_code =
1074 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1075 /* Other bytes are zero */
1078 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1080 qla2x00_start_iocbs(vha, vha->req);
1084 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1086 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1087 struct abts_resp_from_24xx_fw *entry)
1089 struct ctio7_to_24xx *ctio;
1091 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1092 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1093 /* Send marker if required */
1094 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1097 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1099 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1100 "qla_target(%d): %s failed: unable to allocate "
1101 "request packet\n", vha->vp_idx, __func__);
1106 * We've got on entrance firmware's response on by us generated
1107 * ABTS response. So, in it ID fields are reversed.
1110 ctio->entry_type = CTIO_TYPE7;
1111 ctio->entry_count = 1;
1112 ctio->nport_handle = entry->nport_handle;
1113 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1114 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1115 ctio->vp_index = vha->vp_idx;
1116 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1117 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1118 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1119 ctio->exchange_addr = entry->exchange_addr_to_abort;
1120 ctio->u.status1.flags =
1121 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1122 CTIO7_FLAGS_TERMINATE);
1123 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1125 qla2x00_start_iocbs(vha, vha->req);
1127 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1128 FCP_TMF_CMPL, true);
1131 /* ha->hardware_lock supposed to be held on entry */
1132 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1133 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1135 struct qla_hw_data *ha = vha->hw;
1136 struct se_session *se_sess = sess->se_sess;
1137 struct qla_tgt_mgmt_cmd *mcmd;
1138 struct se_cmd *se_cmd;
1141 bool found_lun = false;
1143 spin_lock(&se_sess->sess_cmd_lock);
1144 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1145 struct qla_tgt_cmd *cmd =
1146 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1147 if (cmd->tag == abts->exchange_addr_to_abort) {
1148 lun = cmd->unpacked_lun;
1153 spin_unlock(&se_sess->sess_cmd_lock);
1158 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1159 "qla_target(%d): task abort (tag=%d)\n",
1160 vha->vp_idx, abts->exchange_addr_to_abort);
1162 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1164 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1165 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1166 vha->vp_idx, __func__);
1169 memset(mcmd, 0, sizeof(*mcmd));
1172 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1174 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1175 abts->exchange_addr_to_abort);
1177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1178 "qla_target(%d): tgt_ops->handle_tmr()"
1179 " failed: %d", vha->vp_idx, rc);
1180 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1188 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1190 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1191 struct abts_recv_from_24xx *abts)
1193 struct qla_hw_data *ha = vha->hw;
1194 struct qla_tgt_sess *sess;
1195 uint32_t tag = abts->exchange_addr_to_abort;
1199 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1200 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1201 "qla_target(%d): ABTS: Abort Sequence not "
1202 "supported\n", vha->vp_idx);
1203 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1207 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1209 "qla_target(%d): ABTS: Unknown Exchange "
1210 "Address received\n", vha->vp_idx);
1211 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1216 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1217 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1218 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1219 le32_to_cpu(abts->fcp_hdr_le.parameter));
1221 s_id[0] = abts->fcp_hdr_le.s_id[2];
1222 s_id[1] = abts->fcp_hdr_le.s_id[1];
1223 s_id[2] = abts->fcp_hdr_le.s_id[0];
1225 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1228 "qla_target(%d): task abort for non-existant session\n",
1230 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1231 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1233 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1239 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1242 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1244 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1250 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1252 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1253 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1255 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1256 struct ctio7_to_24xx *ctio;
1258 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1259 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1260 ha, atio, resp_code);
1262 /* Send marker if required */
1263 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1266 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1268 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1269 "qla_target(%d): %s failed: unable to allocate "
1270 "request packet\n", ha->vp_idx, __func__);
1274 ctio->entry_type = CTIO_TYPE7;
1275 ctio->entry_count = 1;
1276 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1277 ctio->nport_handle = mcmd->sess->loop_id;
1278 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1279 ctio->vp_index = ha->vp_idx;
1280 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1281 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1282 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1283 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1284 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1285 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1286 CTIO7_FLAGS_SEND_STATUS);
1287 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1288 ctio->u.status1.scsi_status =
1289 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1290 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1291 ctio->u.status1.sense_data[0] = resp_code;
1293 qla2x00_start_iocbs(ha, ha->req);
1296 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1298 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1300 EXPORT_SYMBOL(qlt_free_mcmd);
1302 /* callback from target fabric module code */
1303 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1305 struct scsi_qla_host *vha = mcmd->sess->vha;
1306 struct qla_hw_data *ha = vha->hw;
1307 unsigned long flags;
1309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1310 "TM response mcmd (%p) status %#x state %#x",
1311 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1313 spin_lock_irqsave(&ha->hardware_lock, flags);
1314 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1315 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1318 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1319 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1320 mcmd->fc_tm_rsp, false);
1322 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1326 * Make the callback for ->free_mcmd() to queue_work() and invoke
1327 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1328 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1329 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1330 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1331 * qlt_xmit_tm_rsp() returns here..
1333 ha->tgt.tgt_ops->free_mcmd(mcmd);
1334 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1336 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1339 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1341 struct qla_tgt_cmd *cmd = prm->cmd;
1343 BUG_ON(cmd->sg_cnt == 0);
1345 prm->sg = (struct scatterlist *)cmd->sg;
1346 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1347 cmd->sg_cnt, cmd->dma_data_direction);
1348 if (unlikely(prm->seg_cnt == 0))
1351 prm->cmd->sg_mapped = 1;
1354 * If greater than four sg entries then we need to allocate
1355 * the continuation entries
1357 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1358 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1359 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1361 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1362 prm->seg_cnt, prm->req_cnt);
1366 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1367 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1368 0, prm->cmd->sg_cnt);
1372 static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1373 struct qla_tgt_cmd *cmd)
1375 struct qla_hw_data *ha = vha->hw;
1377 BUG_ON(!cmd->sg_mapped);
1378 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1382 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1385 struct qla_hw_data *ha = vha->hw;
1386 device_reg_t __iomem *reg = ha->iobase;
1389 if (vha->req->cnt < (req_cnt + 2)) {
1390 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out);
1392 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1393 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1394 "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1395 vha->req->ring_index, vha->req->cnt, req_cnt);
1396 if (vha->req->ring_index < cnt)
1397 vha->req->cnt = cnt - vha->req->ring_index;
1399 vha->req->cnt = vha->req->length -
1400 (vha->req->ring_index - cnt);
1403 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1404 ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1405 "qla_target(%d): There is no room in the "
1406 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1407 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1408 vha->req->cnt, req_cnt);
1411 vha->req->cnt -= req_cnt;
1417 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1419 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1421 /* Adjust ring index. */
1422 vha->req->ring_index++;
1423 if (vha->req->ring_index == vha->req->length) {
1424 vha->req->ring_index = 0;
1425 vha->req->ring_ptr = vha->req->ring;
1427 vha->req->ring_ptr++;
1429 return (cont_entry_t *)vha->req->ring_ptr;
1432 /* ha->hardware_lock supposed to be held on entry */
1433 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1435 struct qla_hw_data *ha = vha->hw;
1438 h = ha->tgt.current_handle;
1439 /* always increment cmd handle */
1442 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1443 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1444 if (h == ha->tgt.current_handle) {
1445 ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1446 "qla_target(%d): Ran out of "
1447 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1448 h = QLA_TGT_NULL_HANDLE;
1451 } while ((h == QLA_TGT_NULL_HANDLE) ||
1452 (h == QLA_TGT_SKIP_HANDLE) ||
1453 (ha->tgt.cmds[h-1] != NULL));
1455 if (h != QLA_TGT_NULL_HANDLE)
1456 ha->tgt.current_handle = h;
1461 /* ha->hardware_lock supposed to be held on entry */
1462 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1463 struct scsi_qla_host *vha)
1466 struct ctio7_to_24xx *pkt;
1467 struct qla_hw_data *ha = vha->hw;
1468 struct atio_from_isp *atio = &prm->cmd->atio;
1470 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1472 memset(pkt, 0, sizeof(*pkt));
1474 pkt->entry_type = CTIO_TYPE7;
1475 pkt->entry_count = (uint8_t)prm->req_cnt;
1476 pkt->vp_index = vha->vp_idx;
1478 h = qlt_make_handle(vha);
1479 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1481 * CTIO type 7 from the firmware doesn't provide a way to
1482 * know the initiator's LOOP ID, hence we can't find
1483 * the session and, so, the command.
1487 ha->tgt.cmds[h-1] = prm->cmd;
1489 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1490 pkt->nport_handle = prm->cmd->loop_id;
1491 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1492 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1493 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1494 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1495 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1496 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1497 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1498 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1500 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1501 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1502 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1503 le16_to_cpu(pkt->u.status0.ox_id));
1508 * ha->hardware_lock supposed to be held on entry. We have already made sure
1509 * that there is sufficient amount of request entries to not drop it.
1511 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1512 struct scsi_qla_host *vha)
1515 uint32_t *dword_ptr;
1516 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1518 /* Build continuation packets */
1519 while (prm->seg_cnt > 0) {
1520 cont_a64_entry_t *cont_pkt64 =
1521 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1524 * Make sure that from cont_pkt64 none of
1525 * 64-bit specific fields used for 32-bit
1526 * addressing. Cast to (cont_entry_t *) for
1530 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1532 cont_pkt64->entry_count = 1;
1533 cont_pkt64->sys_define = 0;
1535 if (enable_64bit_addressing) {
1536 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1538 (uint32_t *)&cont_pkt64->dseg_0_address;
1540 cont_pkt64->entry_type = CONTINUE_TYPE;
1542 (uint32_t *)&((cont_entry_t *)
1543 cont_pkt64)->dseg_0_address;
1546 /* Load continuation entry data segments */
1548 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1549 cnt++, prm->seg_cnt--) {
1551 cpu_to_le32(pci_dma_lo32
1552 (sg_dma_address(prm->sg)));
1553 if (enable_64bit_addressing) {
1555 cpu_to_le32(pci_dma_hi32
1559 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1561 ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1562 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1563 (long long unsigned int)
1564 pci_dma_hi32(sg_dma_address(prm->sg)),
1565 (long long unsigned int)
1566 pci_dma_lo32(sg_dma_address(prm->sg)),
1567 (int)sg_dma_len(prm->sg));
1569 prm->sg = sg_next(prm->sg);
1575 * ha->hardware_lock supposed to be held on entry. We have already made sure
1576 * that there is sufficient amount of request entries to not drop it.
1578 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1579 struct scsi_qla_host *vha)
1582 uint32_t *dword_ptr;
1583 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1584 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1586 ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1587 "iocb->scsi_status=%x, iocb->flags=%x\n",
1588 le16_to_cpu(pkt24->u.status0.scsi_status),
1589 le16_to_cpu(pkt24->u.status0.flags));
1591 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1593 /* Setup packet address segment pointer */
1594 dword_ptr = pkt24->u.status0.dseg_0_address;
1596 /* Set total data segment count */
1598 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1600 if (prm->seg_cnt == 0) {
1601 /* No data transfer */
1607 /* If scatter gather */
1608 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1610 /* Load command entry data segments */
1612 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1613 cnt++, prm->seg_cnt--) {
1615 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1616 if (enable_64bit_addressing) {
1618 cpu_to_le32(pci_dma_hi32(
1619 sg_dma_address(prm->sg)));
1621 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1623 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1624 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1625 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1627 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1629 (int)sg_dma_len(prm->sg));
1631 prm->sg = sg_next(prm->sg);
1634 qlt_load_cont_data_segments(prm, vha);
1637 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1639 return cmd->bufflen > 0;
1643 * Called without ha->hardware_lock held
1645 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1646 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1647 uint32_t *full_req_cnt)
1649 struct qla_tgt *tgt = cmd->tgt;
1650 struct scsi_qla_host *vha = tgt->vha;
1651 struct qla_hw_data *ha = vha->hw;
1652 struct se_cmd *se_cmd = &cmd->se_cmd;
1654 if (unlikely(cmd->aborted)) {
1655 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1656 "qla_target(%d): terminating exchange "
1657 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1660 cmd->state = QLA_TGT_STATE_ABORTED;
1662 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1664 /* !! At this point cmd could be already freed !! */
1665 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1668 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1669 vha->vp_idx, cmd->tag);
1673 prm->rq_result = scsi_status;
1674 prm->sense_buffer = &cmd->sense_buffer[0];
1675 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1679 prm->add_status_pkt = 0;
1681 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1682 prm->rq_result, xmit_type);
1684 /* Send marker if required */
1685 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1688 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1690 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1691 if (qlt_pci_map_calc_cnt(prm) != 0)
1695 *full_req_cnt = prm->req_cnt;
1697 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1698 prm->residual = se_cmd->residual_count;
1699 ql_dbg(ql_dbg_tgt, vha, 0xe014,
1700 "Residual underflow: %d (tag %d, "
1701 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1702 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1703 cmd->bufflen, prm->rq_result);
1704 prm->rq_result |= SS_RESIDUAL_UNDER;
1705 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1706 prm->residual = se_cmd->residual_count;
1707 ql_dbg(ql_dbg_tgt, vha, 0xe015,
1708 "Residual overflow: %d (tag %d, "
1709 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1710 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1711 cmd->bufflen, prm->rq_result);
1712 prm->rq_result |= SS_RESIDUAL_OVER;
1715 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1717 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1718 * ignored in *xmit_response() below
1720 if (qlt_has_data(cmd)) {
1721 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1722 (IS_FWI2_CAPABLE(ha) &&
1723 (prm->rq_result != 0))) {
1724 prm->add_status_pkt = 1;
1730 ql_dbg(ql_dbg_tgt, vha, 0xe016,
1731 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1732 prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1737 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1738 struct qla_tgt_cmd *cmd, int sending_sense)
1740 if (ha->tgt.enable_class_2)
1744 return cmd->conf_compl_supported;
1746 return ha->tgt.enable_explicit_conf &&
1747 cmd->conf_compl_supported;
1750 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1752 * Original taken from the XFS code
1754 static unsigned long qlt_srr_random(void)
1757 static unsigned long RandomValue;
1758 static DEFINE_SPINLOCK(lock);
1759 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1763 unsigned long flags;
1765 spin_lock_irqsave(&lock, flags);
1767 RandomValue = jiffies;
1773 rv = 16807 * lo - 2836 * hi;
1777 spin_unlock_irqrestore(&lock, flags);
1781 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1783 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1784 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1786 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1787 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1788 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1792 * It's currently not possible to simulate SRRs for FCP_WRITE without
1793 * a physical link layer failure, so don't even try here..
1795 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1798 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1799 ((qlt_srr_random() % 100) == 20)) {
1801 unsigned int tot_len = 0;
1804 leave = qlt_srr_random() % cmd->sg_cnt;
1806 for (i = 0; i < leave; i++)
1807 tot_len += cmd->sg[i].length;
1809 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1810 "Cutting cmd %p (tag %d) buffer"
1811 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1812 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1813 cmd->bufflen, cmd->sg_cnt);
1815 cmd->bufflen = tot_len;
1816 cmd->sg_cnt = leave;
1819 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1820 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1822 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1823 "Cutting cmd %p (tag %d) buffer head "
1824 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1827 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1828 else if (qlt_set_data_offset(cmd, offset)) {
1829 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1830 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1835 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1839 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1840 struct qla_tgt_prm *prm)
1842 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1843 (uint32_t)sizeof(ctio->u.status1.sense_data));
1844 ctio->u.status0.flags |=
1845 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1846 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1847 ctio->u.status0.flags |= __constant_cpu_to_le16(
1848 CTIO7_FLAGS_EXPLICIT_CONFORM |
1849 CTIO7_FLAGS_CONFORM_REQ);
1851 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1852 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1853 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1856 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1857 if (prm->cmd->se_cmd.scsi_status != 0) {
1858 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1859 "Skipping EXPLICIT_CONFORM and "
1860 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1861 "non GOOD status\n");
1862 goto skip_explict_conf;
1864 ctio->u.status1.flags |= __constant_cpu_to_le16(
1865 CTIO7_FLAGS_EXPLICIT_CONFORM |
1866 CTIO7_FLAGS_CONFORM_REQ);
1869 ctio->u.status1.flags &=
1870 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1871 ctio->u.status1.flags |=
1872 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1873 ctio->u.status1.scsi_status |=
1874 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1875 ctio->u.status1.sense_length =
1876 cpu_to_le16(prm->sense_buffer_len);
1877 for (i = 0; i < prm->sense_buffer_len/4; i++)
1878 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1879 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1881 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1884 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
1885 "qla_target(%d): %d bytes of sense "
1886 "lost", prm->tgt->ha->vp_idx,
1887 prm->sense_buffer_len % 4);
1893 ctio->u.status1.flags &=
1894 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1895 ctio->u.status1.flags |=
1896 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1897 ctio->u.status1.sense_length = 0;
1898 memset(ctio->u.status1.sense_data, 0,
1899 sizeof(ctio->u.status1.sense_data));
1902 /* Sense with len > 24, is it possible ??? */
1906 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
1907 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
1909 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1910 uint8_t scsi_status)
1912 struct scsi_qla_host *vha = cmd->vha;
1913 struct qla_hw_data *ha = vha->hw;
1914 struct ctio7_to_24xx *pkt;
1915 struct qla_tgt_prm prm;
1916 uint32_t full_req_cnt = 0;
1917 unsigned long flags = 0;
1920 memset(&prm, 0, sizeof(prm));
1921 qlt_check_srr_debug(cmd, &xmit_type);
1923 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
1924 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
1925 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
1926 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
1928 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
1930 if (unlikely(res != 0)) {
1931 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
1937 spin_lock_irqsave(&ha->hardware_lock, flags);
1939 /* Does F/W have an IOCBs for this request */
1940 res = qlt_check_reserve_free_req(vha, full_req_cnt);
1942 goto out_unmap_unlock;
1944 res = qlt_24xx_build_ctio_pkt(&prm, vha);
1945 if (unlikely(res != 0))
1946 goto out_unmap_unlock;
1949 pkt = (struct ctio7_to_24xx *)prm.pkt;
1951 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
1952 pkt->u.status0.flags |=
1953 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
1954 CTIO7_FLAGS_STATUS_MODE_0);
1956 qlt_load_data_segments(&prm, vha);
1958 if (prm.add_status_pkt == 0) {
1959 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1960 pkt->u.status0.scsi_status =
1961 cpu_to_le16(prm.rq_result);
1962 pkt->u.status0.residual =
1963 cpu_to_le32(prm.residual);
1964 pkt->u.status0.flags |= __constant_cpu_to_le16(
1965 CTIO7_FLAGS_SEND_STATUS);
1966 if (qlt_need_explicit_conf(ha, cmd, 0)) {
1967 pkt->u.status0.flags |=
1968 __constant_cpu_to_le16(
1969 CTIO7_FLAGS_EXPLICIT_CONFORM |
1970 CTIO7_FLAGS_CONFORM_REQ);
1976 * We have already made sure that there is sufficient
1977 * amount of request entries to not drop HW lock in
1980 struct ctio7_to_24xx *ctio =
1981 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
1983 ql_dbg(ql_dbg_tgt, vha, 0xe019,
1984 "Building additional status packet\n");
1986 memcpy(ctio, pkt, sizeof(*ctio));
1987 ctio->entry_count = 1;
1988 ctio->dseg_count = 0;
1989 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
1990 CTIO7_FLAGS_DATA_IN);
1992 /* Real finish is ctio_m1's finish */
1993 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
1994 pkt->u.status0.flags |= __constant_cpu_to_le16(
1995 CTIO7_FLAGS_DONT_RET_CTIO);
1996 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
1998 pr_debug("Status CTIO7: %p\n", ctio);
2001 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2004 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2006 ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2007 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2010 qla2x00_start_iocbs(vha, vha->req);
2011 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2017 qlt_unmap_sg(vha, cmd);
2018 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2022 EXPORT_SYMBOL(qlt_xmit_response);
2024 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2026 struct ctio7_to_24xx *pkt;
2027 struct scsi_qla_host *vha = cmd->vha;
2028 struct qla_hw_data *ha = vha->hw;
2029 struct qla_tgt *tgt = cmd->tgt;
2030 struct qla_tgt_prm prm;
2031 unsigned long flags;
2034 memset(&prm, 0, sizeof(prm));
2040 /* Send marker if required */
2041 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2044 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2047 /* Calculate number of entries and segments required */
2048 if (qlt_pci_map_calc_cnt(&prm) != 0)
2051 spin_lock_irqsave(&ha->hardware_lock, flags);
2053 /* Does F/W have an IOCBs for this request */
2054 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2056 goto out_unlock_free_unmap;
2058 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2059 if (unlikely(res != 0))
2060 goto out_unlock_free_unmap;
2061 pkt = (struct ctio7_to_24xx *)prm.pkt;
2062 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2063 CTIO7_FLAGS_STATUS_MODE_0);
2064 qlt_load_data_segments(&prm, vha);
2066 cmd->state = QLA_TGT_STATE_NEED_DATA;
2068 qla2x00_start_iocbs(vha, vha->req);
2069 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2073 out_unlock_free_unmap:
2075 qlt_unmap_sg(vha, cmd);
2076 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2080 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2082 /* If hardware_lock held on entry, might drop it, then reaquire */
2083 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2084 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2085 struct qla_tgt_cmd *cmd,
2086 struct atio_from_isp *atio)
2088 struct ctio7_to_24xx *ctio24;
2089 struct qla_hw_data *ha = vha->hw;
2093 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2095 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2097 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2098 "qla_target(%d): %s failed: unable to allocate "
2099 "request packet\n", vha->vp_idx, __func__);
2104 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2105 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2106 "qla_target(%d): Terminating cmd %p with "
2107 "incorrect state %d\n", vha->vp_idx, cmd,
2113 pkt->entry_count = 1;
2114 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2116 ctio24 = (struct ctio7_to_24xx *)pkt;
2117 ctio24->entry_type = CTIO_TYPE7;
2118 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2119 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2120 ctio24->vp_index = vha->vp_idx;
2121 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2122 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2123 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2124 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2125 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2126 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2127 CTIO7_FLAGS_TERMINATE);
2128 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2130 /* Most likely, it isn't needed */
2131 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2132 &atio->u.isp24.fcp_cmnd.add_cdb[
2133 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2134 if (ctio24->u.status1.residual != 0)
2135 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2137 qla2x00_start_iocbs(vha, vha->req);
2141 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2142 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2144 unsigned long flags;
2147 if (qlt_issue_marker(vha, ha_locked) < 0)
2151 rc = __qlt_send_term_exchange(vha, cmd, atio);
2154 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2155 rc = __qlt_send_term_exchange(vha, cmd, atio);
2156 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2159 if (!ha_locked && !in_interrupt())
2160 msleep(250); /* just in case */
2162 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2166 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2168 BUG_ON(cmd->sg_mapped);
2170 if (unlikely(cmd->free_sg))
2172 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2174 EXPORT_SYMBOL(qlt_free_cmd);
2176 /* ha->hardware_lock supposed to be held on entry */
2177 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2178 struct qla_tgt_cmd *cmd, void *ctio)
2180 struct qla_tgt_srr_ctio *sc;
2181 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2182 struct qla_tgt_srr_imm *imm;
2186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2187 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2190 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2191 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2196 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2199 /* IRQ is already OFF */
2200 spin_lock(&tgt->srr_lock);
2201 sc->srr_id = tgt->ctio_srr_id;
2202 list_add_tail(&sc->srr_list_entry,
2203 &tgt->srr_ctio_list);
2204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2205 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2206 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2208 list_for_each_entry(imm, &tgt->srr_imm_list,
2210 if (imm->srr_id == sc->srr_id) {
2216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2217 "Scheduling srr work\n");
2218 schedule_work(&tgt->srr_work);
2220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2221 "qla_target(%d): imm_srr_id "
2222 "== ctio_srr_id (%d), but there is no "
2223 "corresponding SRR IMM, deleting CTIO "
2224 "SRR %p\n", vha->vp_idx,
2225 tgt->ctio_srr_id, sc);
2226 list_del(&sc->srr_list_entry);
2227 spin_unlock(&tgt->srr_lock);
2233 spin_unlock(&tgt->srr_lock);
2235 struct qla_tgt_srr_imm *ti;
2237 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2238 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2240 spin_lock(&tgt->srr_lock);
2241 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2243 if (imm->srr_id == tgt->ctio_srr_id) {
2244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2245 "IMM SRR %p deleted (id %d)\n",
2247 list_del(&imm->srr_list_entry);
2248 qlt_reject_free_srr_imm(vha, imm, 1);
2251 spin_unlock(&tgt->srr_lock);
2260 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2262 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2263 struct qla_tgt_cmd *cmd, uint32_t status)
2268 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2270 __constant_cpu_to_le16(OF_TERM_EXCH));
2275 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2280 /* ha->hardware_lock supposed to be held on entry */
2281 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2284 struct qla_hw_data *ha = vha->hw;
2287 if (ha->tgt.cmds[handle] != NULL) {
2288 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2289 ha->tgt.cmds[handle] = NULL;
2295 /* ha->hardware_lock supposed to be held on entry */
2296 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2297 uint32_t handle, void *ctio)
2299 struct qla_tgt_cmd *cmd = NULL;
2301 /* Clear out internal marks */
2302 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2303 CTIO_INTERMEDIATE_HANDLE_MARK);
2305 if (handle != QLA_TGT_NULL_HANDLE) {
2306 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2307 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2308 "SKIP_HANDLE CTIO\n");
2311 /* handle-1 is actually used */
2312 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2313 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2314 "qla_target(%d): Wrong handle %x received\n",
2315 vha->vp_idx, handle);
2318 cmd = qlt_get_cmd(vha, handle);
2319 if (unlikely(cmd == NULL)) {
2320 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2321 "qla_target(%d): Suspicious: unable to "
2322 "find the command with handle %x\n", vha->vp_idx,
2326 } else if (ctio != NULL) {
2327 /* We can't get loop ID from CTIO7 */
2328 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2329 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2330 "support NULL handles\n", vha->vp_idx);
2338 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2340 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2341 uint32_t status, void *ctio)
2343 struct qla_hw_data *ha = vha->hw;
2344 struct se_cmd *se_cmd;
2345 struct target_core_fabric_ops *tfo;
2346 struct qla_tgt_cmd *cmd;
2348 ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2349 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2350 vha->vp_idx, ctio, status, handle);
2352 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2353 /* That could happen only in case of an error/reset/abort */
2354 if (status != CTIO_SUCCESS) {
2355 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2356 "Intermediate CTIO received"
2357 " (status %x)\n", status);
2362 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2366 se_cmd = &cmd->se_cmd;
2367 tfo = se_cmd->se_tfo;
2370 qlt_unmap_sg(vha, cmd);
2372 if (unlikely(status != CTIO_SUCCESS)) {
2373 switch (status & 0xFFFF) {
2374 case CTIO_LIP_RESET:
2375 case CTIO_TARGET_RESET:
2378 case CTIO_INVALID_RX_ID:
2380 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2381 "qla_target(%d): CTIO with "
2382 "status %#x received, state %x, se_cmd %p, "
2383 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2384 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2385 status, cmd->state, se_cmd);
2388 case CTIO_PORT_LOGGED_OUT:
2389 case CTIO_PORT_UNAVAILABLE:
2390 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2391 "qla_target(%d): CTIO with PORT LOGGED "
2392 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2393 "received (state %x, se_cmd %p)\n", vha->vp_idx,
2394 status, cmd->state, se_cmd);
2397 case CTIO_SRR_RECEIVED:
2398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2399 "qla_target(%d): CTIO with SRR_RECEIVED"
2400 " status %x received (state %x, se_cmd %p)\n",
2401 vha->vp_idx, status, cmd->state, se_cmd);
2402 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2409 "qla_target(%d): CTIO with error status "
2410 "0x%x received (state %x, se_cmd %p\n",
2411 vha->vp_idx, status, cmd->state, se_cmd);
2415 if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2416 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2420 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2421 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2422 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2425 cmd->state = QLA_TGT_STATE_DATA_IN;
2427 if (unlikely(status != CTIO_SUCCESS))
2430 cmd->write_data_transferred = 1;
2432 ql_dbg(ql_dbg_tgt, vha, 0xe020,
2433 "Data received, context %x, rx_status %d\n",
2436 ha->tgt.tgt_ops->handle_data(cmd);
2438 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2440 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2442 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2443 "qla_target(%d): A command in state (%d) should "
2444 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2447 if (unlikely(status != CTIO_SUCCESS)) {
2448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2452 ha->tgt.tgt_ops->free_cmd(cmd);
2455 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2460 switch (task_codes) {
2461 case ATIO_SIMPLE_QUEUE:
2462 fcp_task_attr = MSG_SIMPLE_TAG;
2464 case ATIO_HEAD_OF_QUEUE:
2465 fcp_task_attr = MSG_HEAD_TAG;
2467 case ATIO_ORDERED_QUEUE:
2468 fcp_task_attr = MSG_ORDERED_TAG;
2470 case ATIO_ACA_QUEUE:
2471 fcp_task_attr = MSG_ACA_TAG;
2474 fcp_task_attr = MSG_SIMPLE_TAG;
2477 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2478 "qla_target: unknown task code %x, use ORDERED instead\n",
2480 fcp_task_attr = MSG_ORDERED_TAG;
2484 return fcp_task_attr;
2487 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2490 * Process context for I/O path into tcm_qla2xxx code
2492 static void qlt_do_work(struct work_struct *work)
2494 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2495 scsi_qla_host_t *vha = cmd->vha;
2496 struct qla_hw_data *ha = vha->hw;
2497 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2498 struct qla_tgt_sess *sess = NULL;
2499 struct atio_from_isp *atio = &cmd->atio;
2501 unsigned long flags;
2502 uint32_t data_length;
2503 int ret, fcp_task_attr, data_dir, bidi = 0;
2508 spin_lock_irqsave(&ha->hardware_lock, flags);
2509 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2510 atio->u.isp24.fcp_hdr.s_id);
2511 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2513 kref_get(&sess->se_sess->sess_kref);
2514 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2516 if (unlikely(!sess)) {
2517 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2519 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2520 "qla_target(%d): Unable to find wwn login"
2521 " (s_id %x:%x:%x), trying to create it manually\n",
2522 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2524 if (atio->u.raw.entry_count > 1) {
2525 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2526 "Dropping multy entry cmd %p\n", cmd);
2530 mutex_lock(&vha->vha_tgt.tgt_mutex);
2531 sess = qlt_make_local_sess(vha, s_id);
2532 /* sess has an extra creation ref. */
2533 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2540 cmd->loop_id = sess->loop_id;
2541 cmd->conf_compl_supported = sess->conf_compl_supported;
2543 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2544 cmd->tag = atio->u.isp24.exchange_addr;
2545 cmd->unpacked_lun = scsilun_to_int(
2546 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2548 if (atio->u.isp24.fcp_cmnd.rddata &&
2549 atio->u.isp24.fcp_cmnd.wrdata) {
2551 data_dir = DMA_TO_DEVICE;
2552 } else if (atio->u.isp24.fcp_cmnd.rddata)
2553 data_dir = DMA_FROM_DEVICE;
2554 else if (atio->u.isp24.fcp_cmnd.wrdata)
2555 data_dir = DMA_TO_DEVICE;
2557 data_dir = DMA_NONE;
2559 fcp_task_attr = qlt_get_fcp_task_attr(vha,
2560 atio->u.isp24.fcp_cmnd.task_attr);
2561 data_length = be32_to_cpu(get_unaligned((uint32_t *)
2562 &atio->u.isp24.fcp_cmnd.add_cdb[
2563 atio->u.isp24.fcp_cmnd.add_cdb_len]));
2565 ql_dbg(ql_dbg_tgt, vha, 0xe022,
2566 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2567 cmd, cmd->unpacked_lun, cmd->tag);
2569 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2570 fcp_task_attr, data_dir, bidi);
2574 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2576 spin_lock_irqsave(&ha->hardware_lock, flags);
2577 ha->tgt.tgt_ops->put_sess(sess);
2578 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2582 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2584 * cmd has not sent to target yet, so pass NULL as the second
2585 * argument to qlt_send_term_exchange() and free the memory here.
2587 spin_lock_irqsave(&ha->hardware_lock, flags);
2588 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2589 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2591 ha->tgt.tgt_ops->put_sess(sess);
2592 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2595 /* ha->hardware_lock supposed to be held on entry */
2596 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2597 struct atio_from_isp *atio)
2599 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2600 struct qla_tgt_cmd *cmd;
2602 if (unlikely(tgt->tgt_stop)) {
2603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2604 "New command while device %p is shutting down\n", tgt);
2608 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2611 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2615 memcpy(&cmd->atio, atio, sizeof(*atio));
2616 cmd->state = QLA_TGT_STATE_NEW;
2617 cmd->tgt = vha->vha_tgt.qla_tgt;
2620 INIT_WORK(&cmd->work, qlt_do_work);
2621 queue_work(qla_tgt_wq, &cmd->work);
2626 /* ha->hardware_lock supposed to be held on entry */
2627 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2628 int fn, void *iocb, int flags)
2630 struct scsi_qla_host *vha = sess->vha;
2631 struct qla_hw_data *ha = vha->hw;
2632 struct qla_tgt_mgmt_cmd *mcmd;
2636 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2638 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2639 "qla_target(%d): Allocation of management "
2640 "command failed, some commands and their data could "
2641 "leak\n", vha->vp_idx);
2644 memset(mcmd, 0, sizeof(*mcmd));
2648 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2649 sizeof(mcmd->orig_iocb.imm_ntfy));
2651 mcmd->tmr_func = fn;
2652 mcmd->flags = flags;
2655 case QLA_TGT_CLEAR_ACA:
2656 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2657 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2658 tmr_func = TMR_CLEAR_ACA;
2661 case QLA_TGT_TARGET_RESET:
2662 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2663 "qla_target(%d): TARGET_RESET received\n",
2665 tmr_func = TMR_TARGET_WARM_RESET;
2668 case QLA_TGT_LUN_RESET:
2669 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2670 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2671 tmr_func = TMR_LUN_RESET;
2674 case QLA_TGT_CLEAR_TS:
2675 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2676 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2677 tmr_func = TMR_CLEAR_TASK_SET;
2680 case QLA_TGT_ABORT_TS:
2681 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2682 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2683 tmr_func = TMR_ABORT_TASK_SET;
2686 case QLA_TGT_ABORT_ALL:
2687 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2688 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2693 case QLA_TGT_ABORT_ALL_SESS:
2694 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2695 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2700 case QLA_TGT_NEXUS_LOSS_SESS:
2701 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2702 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2707 case QLA_TGT_NEXUS_LOSS:
2708 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2709 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2714 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2715 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2716 sess->vha->vp_idx, fn);
2717 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2721 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2723 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2724 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2725 sess->vha->vp_idx, res);
2726 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2733 /* ha->hardware_lock supposed to be held on entry */
2734 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2736 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2737 struct qla_hw_data *ha = vha->hw;
2738 struct qla_tgt *tgt;
2739 struct qla_tgt_sess *sess;
2740 uint32_t lun, unpacked_lun;
2743 tgt = vha->vha_tgt.qla_tgt;
2745 lun = a->u.isp24.fcp_cmnd.lun;
2746 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2747 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2748 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2749 a->u.isp24.fcp_hdr.s_id);
2750 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2754 "qla_target(%d): task mgmt fn 0x%x for "
2755 "non-existant session\n", vha->vp_idx, fn);
2756 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2757 sizeof(struct atio_from_isp));
2760 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2763 /* ha->hardware_lock supposed to be held on entry */
2764 static int __qlt_abort_task(struct scsi_qla_host *vha,
2765 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2767 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2768 struct qla_hw_data *ha = vha->hw;
2769 struct qla_tgt_mgmt_cmd *mcmd;
2770 uint32_t lun, unpacked_lun;
2773 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2776 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2777 vha->vp_idx, __func__);
2780 memset(mcmd, 0, sizeof(*mcmd));
2783 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2784 sizeof(mcmd->orig_iocb.imm_ntfy));
2786 lun = a->u.isp24.fcp_cmnd.lun;
2787 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2789 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2790 le16_to_cpu(iocb->u.isp2x.seq_id));
2792 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2793 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2795 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2802 /* ha->hardware_lock supposed to be held on entry */
2803 static int qlt_abort_task(struct scsi_qla_host *vha,
2804 struct imm_ntfy_from_isp *iocb)
2806 struct qla_hw_data *ha = vha->hw;
2807 struct qla_tgt_sess *sess;
2810 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2812 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2814 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2815 "qla_target(%d): task abort for unexisting "
2816 "session\n", vha->vp_idx);
2817 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
2818 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2821 return __qlt_abort_task(vha, iocb, sess);
2825 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2827 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2828 struct imm_ntfy_from_isp *iocb)
2832 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2833 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
2834 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
2836 switch (iocb->u.isp24.status_subcode) {
2842 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2847 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2848 if (tgt->link_reinit_iocb_pending) {
2849 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2851 tgt->link_reinit_iocb_pending = 0;
2853 res = 1; /* send notify ack */
2858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
2859 "qla_target(%d): Unsupported ELS command %x "
2860 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
2861 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2868 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
2870 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
2871 size_t first_offset = 0, rem_offset = offset, tmp = 0;
2872 int i, sg_srr_cnt, bufflen = 0;
2874 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
2875 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
2876 "cmd->sg_cnt: %u, direction: %d\n",
2877 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
2880 * FIXME: Reject non zero SRR relative offset until we can test
2881 * this code properly.
2883 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
2886 if (!cmd->sg || !cmd->sg_cnt) {
2887 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
2888 "Missing cmd->sg or zero cmd->sg_cnt in"
2889 " qla_tgt_set_data_offset\n");
2893 * Walk the current cmd->sg list until we locate the new sg_srr_start
2895 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
2896 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
2897 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
2898 i, sg, sg_page(sg), sg->length, sg->offset);
2900 if ((sg->length + tmp) > offset) {
2901 first_offset = rem_offset;
2903 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
2904 "Found matching sg[%d], using %p as sg_srr_start, "
2905 "and using first_offset: %zu\n", i, sg,
2910 rem_offset -= sg->length;
2913 if (!sg_srr_start) {
2914 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
2915 "Unable to locate sg_srr_start for offset: %u\n", offset);
2918 sg_srr_cnt = (cmd->sg_cnt - i);
2920 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
2922 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
2923 "Unable to allocate sgp\n");
2926 sg_init_table(sg_srr, sg_srr_cnt);
2929 * Walk the remaining list for sg_srr_start, mapping to the newly
2930 * allocated sg_srr taking first_offset into account.
2932 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
2934 sg_set_page(sgp, sg_page(sg),
2935 (sg->length - first_offset), first_offset);
2938 sg_set_page(sgp, sg_page(sg), sg->length, 0);
2940 bufflen += sgp->length;
2948 cmd->sg_cnt = sg_srr_cnt;
2949 cmd->bufflen = bufflen;
2950 cmd->offset += offset;
2953 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
2954 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
2956 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
2958 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
2961 if (cmd->sg_cnt < 0)
2964 if (cmd->bufflen < 0)
2970 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
2971 uint32_t srr_rel_offs, int *xmit_type)
2973 int res = 0, rel_offs;
2975 rel_offs = srr_rel_offs - cmd->offset;
2976 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
2977 srr_rel_offs, rel_offs);
2979 *xmit_type = QLA_TGT_XMIT_ALL;
2982 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
2983 "qla_target(%d): SRR rel_offs (%d) < 0",
2984 cmd->vha->vp_idx, rel_offs);
2986 } else if (rel_offs == cmd->bufflen)
2987 *xmit_type = QLA_TGT_XMIT_STATUS;
2988 else if (rel_offs > 0)
2989 res = qlt_set_data_offset(cmd, rel_offs);
2994 /* No locks, thread context */
2995 static void qlt_handle_srr(struct scsi_qla_host *vha,
2996 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
2998 struct imm_ntfy_from_isp *ntfy =
2999 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3000 struct qla_hw_data *ha = vha->hw;
3001 struct qla_tgt_cmd *cmd = sctio->cmd;
3002 struct se_cmd *se_cmd = &cmd->se_cmd;
3003 unsigned long flags;
3004 int xmit_type = 0, resp = 0;
3008 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3009 srr_ui = ntfy->u.isp24.srr_ui;
3011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3016 spin_lock_irqsave(&ha->hardware_lock, flags);
3017 qlt_send_notify_ack(vha, ntfy,
3018 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3019 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3020 xmit_type = QLA_TGT_XMIT_STATUS;
3023 case SRR_IU_DATA_IN:
3024 if (!cmd->sg || !cmd->sg_cnt) {
3025 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3026 "Unable to process SRR_IU_DATA_IN due to"
3027 " missing cmd->sg, state: %d\n", cmd->state);
3031 if (se_cmd->scsi_status != 0) {
3032 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3033 "Rejecting SRR_IU_DATA_IN with non GOOD "
3037 cmd->bufflen = se_cmd->data_length;
3039 if (qlt_has_data(cmd)) {
3040 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3042 spin_lock_irqsave(&ha->hardware_lock, flags);
3043 qlt_send_notify_ack(vha, ntfy,
3044 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3049 "qla_target(%d): SRR for in data for cmd "
3050 "without them (tag %d, SCSI status %d), "
3051 "reject", vha->vp_idx, cmd->tag,
3052 cmd->se_cmd.scsi_status);
3056 case SRR_IU_DATA_OUT:
3057 if (!cmd->sg || !cmd->sg_cnt) {
3058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3059 "Unable to process SRR_IU_DATA_OUT due to"
3060 " missing cmd->sg\n");
3064 if (se_cmd->scsi_status != 0) {
3065 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3066 "Rejecting SRR_IU_DATA_OUT"
3067 " with non GOOD scsi_status\n");
3070 cmd->bufflen = se_cmd->data_length;
3072 if (qlt_has_data(cmd)) {
3073 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3075 spin_lock_irqsave(&ha->hardware_lock, flags);
3076 qlt_send_notify_ack(vha, ntfy,
3077 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3078 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3079 if (xmit_type & QLA_TGT_XMIT_DATA)
3080 qlt_rdy_to_xfer(cmd);
3082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3083 "qla_target(%d): SRR for out data for cmd "
3084 "without them (tag %d, SCSI status %d), "
3085 "reject", vha->vp_idx, cmd->tag,
3086 cmd->se_cmd.scsi_status);
3091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3092 "qla_target(%d): Unknown srr_ui value %x",
3093 vha->vp_idx, srr_ui);
3097 /* Transmit response in case of status and data-in cases */
3099 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3104 spin_lock_irqsave(&ha->hardware_lock, flags);
3105 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3106 NOTIFY_ACK_SRR_FLAGS_REJECT,
3107 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3108 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3109 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3110 cmd->state = QLA_TGT_STATE_DATA_IN;
3113 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3117 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3118 struct qla_tgt_srr_imm *imm, int ha_locked)
3120 struct qla_hw_data *ha = vha->hw;
3121 unsigned long flags = 0;
3124 spin_lock_irqsave(&ha->hardware_lock, flags);
3126 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3127 NOTIFY_ACK_SRR_FLAGS_REJECT,
3128 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3129 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3132 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3137 static void qlt_handle_srr_work(struct work_struct *work)
3139 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3140 struct scsi_qla_host *vha = tgt->vha;
3141 struct qla_tgt_srr_ctio *sctio;
3142 unsigned long flags;
3144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3148 spin_lock_irqsave(&tgt->srr_lock, flags);
3149 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3150 struct qla_tgt_srr_imm *imm, *i, *ti;
3151 struct qla_tgt_cmd *cmd;
3152 struct se_cmd *se_cmd;
3155 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3157 if (i->srr_id == sctio->srr_id) {
3158 list_del(&i->srr_list_entry);
3160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3161 "qla_target(%d): There must be "
3162 "only one IMM SRR per CTIO SRR "
3163 "(IMM SRR %p, id %d, CTIO %p\n",
3164 vha->vp_idx, i, i->srr_id, sctio);
3165 qlt_reject_free_srr_imm(tgt->vha, i, 0);
3171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3172 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3176 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3177 "Not found matching IMM for SRR CTIO (id %d)\n",
3181 list_del(&sctio->srr_list_entry);
3183 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3187 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3188 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3197 se_cmd = &cmd->se_cmd;
3199 cmd->sg_cnt = se_cmd->t_data_nents;
3200 cmd->sg = se_cmd->t_data_sg;
3202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3203 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3204 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3205 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
3206 cmd->sg_cnt, cmd->offset);
3208 qlt_handle_srr(vha, sctio, imm);
3214 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3217 /* ha->hardware_lock supposed to be held on entry */
3218 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3219 struct imm_ntfy_from_isp *iocb)
3221 struct qla_tgt_srr_imm *imm;
3222 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3223 struct qla_tgt_srr_ctio *sctio;
3227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3230 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3232 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3234 /* IRQ is already OFF */
3235 spin_lock(&tgt->srr_lock);
3236 imm->srr_id = tgt->imm_srr_id;
3237 list_add_tail(&imm->srr_list_entry,
3238 &tgt->srr_imm_list);
3239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3240 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3241 imm, imm->srr_id, iocb->u.isp24.srr_ui);
3242 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3244 list_for_each_entry(sctio, &tgt->srr_ctio_list,
3246 if (sctio->srr_id == imm->srr_id) {
3252 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3253 "Scheduling srr work\n");
3254 schedule_work(&tgt->srr_work);
3256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3257 "qla_target(%d): imm_srr_id "
3258 "== ctio_srr_id (%d), but there is no "
3259 "corresponding SRR CTIO, deleting IMM "
3260 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3262 list_del(&imm->srr_list_entry);
3266 spin_unlock(&tgt->srr_lock);
3270 spin_unlock(&tgt->srr_lock);
3272 struct qla_tgt_srr_ctio *ts;
3274 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3275 "qla_target(%d): Unable to allocate SRR IMM "
3276 "entry, SRR request will be rejected\n", vha->vp_idx);
3278 /* IRQ is already OFF */
3279 spin_lock(&tgt->srr_lock);
3280 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3282 if (sctio->srr_id == tgt->imm_srr_id) {
3283 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3284 "CTIO SRR %p deleted (id %d)\n",
3285 sctio, sctio->srr_id);
3286 list_del(&sctio->srr_list_entry);
3287 qlt_send_term_exchange(vha, sctio->cmd,
3288 &sctio->cmd->atio, 1);
3292 spin_unlock(&tgt->srr_lock);
3299 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3300 NOTIFY_ACK_SRR_FLAGS_REJECT,
3301 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3302 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3306 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3308 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3309 struct imm_ntfy_from_isp *iocb)
3311 struct qla_hw_data *ha = vha->hw;
3312 uint32_t add_flags = 0;
3313 int send_notify_ack = 1;
3316 status = le16_to_cpu(iocb->u.isp2x.status);
3318 case IMM_NTFY_LIP_RESET:
3320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3321 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3322 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3323 iocb->u.isp24.status_subcode);
3325 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3326 send_notify_ack = 0;
3330 case IMM_NTFY_LIP_LINK_REINIT:
3332 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3333 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3334 "qla_target(%d): LINK REINIT (loop %#x, "
3335 "subcode %x)\n", vha->vp_idx,
3336 le16_to_cpu(iocb->u.isp24.nport_handle),
3337 iocb->u.isp24.status_subcode);
3338 if (tgt->link_reinit_iocb_pending) {
3339 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3342 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3343 tgt->link_reinit_iocb_pending = 1;
3345 * QLogic requires to wait after LINK REINIT for possible
3346 * PDISC or ADISC ELS commands
3348 send_notify_ack = 0;
3352 case IMM_NTFY_PORT_LOGOUT:
3353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3354 "qla_target(%d): Port logout (loop "
3355 "%#x, subcode %x)\n", vha->vp_idx,
3356 le16_to_cpu(iocb->u.isp24.nport_handle),
3357 iocb->u.isp24.status_subcode);
3359 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3360 send_notify_ack = 0;
3361 /* The sessions will be cleared in the callback, if needed */
3364 case IMM_NTFY_GLBL_TPRLO:
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3366 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3367 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3368 send_notify_ack = 0;
3369 /* The sessions will be cleared in the callback, if needed */
3372 case IMM_NTFY_PORT_CONFIG:
3373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3374 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3376 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3377 send_notify_ack = 0;
3378 /* The sessions will be cleared in the callback, if needed */
3381 case IMM_NTFY_GLBL_LOGO:
3382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3383 "qla_target(%d): Link failure detected\n",
3385 /* I_T nexus loss */
3386 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3387 send_notify_ack = 0;
3390 case IMM_NTFY_IOCB_OVERFLOW:
3391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3392 "qla_target(%d): Cannot provide requested "
3393 "capability (IOCB overflowed the immediate notify "
3394 "resource count)\n", vha->vp_idx);
3397 case IMM_NTFY_ABORT_TASK:
3398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3399 "qla_target(%d): Abort Task (S %08x I %#x -> "
3400 "L %#x)\n", vha->vp_idx,
3401 le16_to_cpu(iocb->u.isp2x.seq_id),
3402 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3403 le16_to_cpu(iocb->u.isp2x.lun));
3404 if (qlt_abort_task(vha, iocb) == 0)
3405 send_notify_ack = 0;
3408 case IMM_NTFY_RESOURCE:
3409 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3410 "qla_target(%d): Out of resources, host %ld\n",
3411 vha->vp_idx, vha->host_no);
3414 case IMM_NTFY_MSG_RX:
3415 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3416 "qla_target(%d): Immediate notify task %x\n",
3417 vha->vp_idx, iocb->u.isp2x.task_flags);
3418 if (qlt_handle_task_mgmt(vha, iocb) == 0)
3419 send_notify_ack = 0;
3423 if (qlt_24xx_handle_els(vha, iocb) == 0)
3424 send_notify_ack = 0;
3428 qlt_prepare_srr_imm(vha, iocb);
3429 send_notify_ack = 0;
3433 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3434 "qla_target(%d): Received unknown immediate "
3435 "notify status %x\n", vha->vp_idx, status);
3439 if (send_notify_ack)
3440 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3444 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3445 * This function sends busy to ISP 2xxx or 24xx.
3447 static void qlt_send_busy(struct scsi_qla_host *vha,
3448 struct atio_from_isp *atio, uint16_t status)
3450 struct ctio7_to_24xx *ctio24;
3451 struct qla_hw_data *ha = vha->hw;
3453 struct qla_tgt_sess *sess = NULL;
3455 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3456 atio->u.isp24.fcp_hdr.s_id);
3458 qlt_send_term_exchange(vha, NULL, atio, 1);
3461 /* Sending marker isn't necessary, since we called from ISR */
3463 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3465 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3466 "qla_target(%d): %s failed: unable to allocate "
3467 "request packet", vha->vp_idx, __func__);
3471 pkt->entry_count = 1;
3472 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3474 ctio24 = (struct ctio7_to_24xx *)pkt;
3475 ctio24->entry_type = CTIO_TYPE7;
3476 ctio24->nport_handle = sess->loop_id;
3477 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
3478 ctio24->vp_index = vha->vp_idx;
3479 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3480 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3481 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3482 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3483 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3484 __constant_cpu_to_le16(
3485 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
3486 CTIO7_FLAGS_DONT_RET_CTIO);
3488 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3489 * if the explicit conformation is used.
3491 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3492 ctio24->u.status1.scsi_status = cpu_to_le16(status);
3493 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3494 &atio->u.isp24.fcp_cmnd.add_cdb[
3495 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3496 if (ctio24->u.status1.residual != 0)
3497 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3499 qla2x00_start_iocbs(vha, vha->req);
3502 /* ha->hardware_lock supposed to be held on entry */
3503 /* called via callback from qla2xxx */
3504 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3505 struct atio_from_isp *atio)
3507 struct qla_hw_data *ha = vha->hw;
3508 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3511 if (unlikely(tgt == NULL)) {
3512 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3513 "ATIO pkt, but no tgt (ha %p)", ha);
3516 ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3517 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3518 vha->vp_idx, atio, atio->u.raw.entry_type,
3519 atio->u.raw.entry_count);
3521 * In tgt_stop mode we also should allow all requests to pass.
3522 * Otherwise, some commands can stuck.
3525 tgt->irq_cmd_count++;
3527 switch (atio->u.raw.entry_type) {
3529 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3530 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3531 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3532 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3533 atio->u.isp24.fcp_cmnd.rddata,
3534 atio->u.isp24.fcp_cmnd.wrdata,
3535 atio->u.isp24.fcp_cmnd.add_cdb_len,
3536 be32_to_cpu(get_unaligned((uint32_t *)
3537 &atio->u.isp24.fcp_cmnd.add_cdb[
3538 atio->u.isp24.fcp_cmnd.add_cdb_len])),
3539 atio->u.isp24.fcp_hdr.s_id[0],
3540 atio->u.isp24.fcp_hdr.s_id[1],
3541 atio->u.isp24.fcp_hdr.s_id[2]);
3543 if (unlikely(atio->u.isp24.exchange_addr ==
3544 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
3545 ql_dbg(ql_dbg_tgt, vha, 0xe058,
3546 "qla_target(%d): ATIO_TYPE7 "
3547 "received with UNKNOWN exchange address, "
3548 "sending QUEUE_FULL\n", vha->vp_idx);
3549 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3552 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3553 rc = qlt_handle_cmd_for_atio(vha, atio);
3555 rc = qlt_handle_task_mgmt(vha, atio);
3556 if (unlikely(rc != 0)) {
3558 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3559 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3561 qlt_send_term_exchange(vha, NULL, atio, 1);
3564 if (tgt->tgt_stop) {
3565 ql_dbg(ql_dbg_tgt, vha, 0xe059,
3566 "qla_target: Unable to send "
3567 "command to target for req, "
3570 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3571 "qla_target(%d): Unable to send "
3572 "command to target, sending BUSY "
3573 "status.\n", vha->vp_idx);
3574 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3580 case IMMED_NOTIFY_TYPE:
3582 if (unlikely(atio->u.isp2x.entry_status != 0)) {
3583 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3584 "qla_target(%d): Received ATIO packet %x "
3585 "with error status %x\n", vha->vp_idx,
3586 atio->u.raw.entry_type,
3587 atio->u.isp2x.entry_status);
3590 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3591 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3596 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3597 "qla_target(%d): Received unknown ATIO atio "
3598 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3602 tgt->irq_cmd_count--;
3605 /* ha->hardware_lock supposed to be held on entry */
3606 /* called via callback from qla2xxx */
3607 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3609 struct qla_hw_data *ha = vha->hw;
3610 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3612 if (unlikely(tgt == NULL)) {
3613 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3614 "qla_target(%d): Response pkt %x received, but no "
3615 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3619 ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3620 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3621 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3622 pkt->entry_count, pkt->entry_status, pkt->handle);
3625 * In tgt_stop mode we also should allow all requests to pass.
3626 * Otherwise, some commands can stuck.
3629 tgt->irq_cmd_count++;
3631 switch (pkt->entry_type) {
3634 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3635 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3637 qlt_do_ctio_completion(vha, entry->handle,
3638 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3643 case ACCEPT_TGT_IO_TYPE:
3645 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3647 ql_dbg(ql_dbg_tgt, vha, 0xe031,
3648 "ACCEPT_TGT_IO instance %d status %04x "
3649 "lun %04x read/write %d data_length %04x "
3650 "target_id %02x rx_id %04x\n ", vha->vp_idx,
3651 le16_to_cpu(atio->u.isp2x.status),
3652 le16_to_cpu(atio->u.isp2x.lun),
3653 atio->u.isp2x.execution_codes,
3654 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3655 atio), atio->u.isp2x.rx_id);
3656 if (atio->u.isp2x.status !=
3657 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
3658 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3659 "qla_target(%d): ATIO with error "
3660 "status %x received\n", vha->vp_idx,
3661 le16_to_cpu(atio->u.isp2x.status));
3664 ql_dbg(ql_dbg_tgt, vha, 0xe032,
3665 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3666 atio->u.isp2x.cdb[0], (unsigned long
3667 int)sizeof(atio->u.isp2x.cdb));
3669 rc = qlt_handle_cmd_for_atio(vha, atio);
3670 if (unlikely(rc != 0)) {
3672 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3673 qlt_send_busy(vha, atio, 0);
3675 qlt_send_term_exchange(vha, NULL, atio, 1);
3678 if (tgt->tgt_stop) {
3679 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3680 "qla_target: Unable to send "
3681 "command to target, sending TERM "
3682 "EXCHANGE for rsp\n");
3683 qlt_send_term_exchange(vha, NULL,
3686 ql_dbg(ql_dbg_tgt, vha, 0xe060,
3687 "qla_target(%d): Unable to send "
3688 "command to target, sending BUSY "
3689 "status\n", vha->vp_idx);
3690 qlt_send_busy(vha, atio, 0);
3697 case CONTINUE_TGT_IO_TYPE:
3699 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3700 ql_dbg(ql_dbg_tgt, vha, 0xe033,
3701 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3702 qlt_do_ctio_completion(vha, entry->handle,
3703 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3710 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3711 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3713 qlt_do_ctio_completion(vha, entry->handle,
3714 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3719 case IMMED_NOTIFY_TYPE:
3720 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3721 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3724 case NOTIFY_ACK_TYPE:
3725 if (tgt->notify_ack_expected > 0) {
3726 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3727 ql_dbg(ql_dbg_tgt, vha, 0xe036,
3728 "NOTIFY_ACK seq %08x status %x\n",
3729 le16_to_cpu(entry->u.isp2x.seq_id),
3730 le16_to_cpu(entry->u.isp2x.status));
3731 tgt->notify_ack_expected--;
3732 if (entry->u.isp2x.status !=
3733 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
3734 ql_dbg(ql_dbg_tgt, vha, 0xe061,
3735 "qla_target(%d): NOTIFY_ACK "
3736 "failed %x\n", vha->vp_idx,
3737 le16_to_cpu(entry->u.isp2x.status));
3740 ql_dbg(ql_dbg_tgt, vha, 0xe062,
3741 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3746 case ABTS_RECV_24XX:
3747 ql_dbg(ql_dbg_tgt, vha, 0xe037,
3748 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3749 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3752 case ABTS_RESP_24XX:
3753 if (tgt->abts_resp_expected > 0) {
3754 struct abts_resp_from_24xx_fw *entry =
3755 (struct abts_resp_from_24xx_fw *)pkt;
3756 ql_dbg(ql_dbg_tgt, vha, 0xe038,
3757 "ABTS_RESP_24XX: compl_status %x\n",
3758 entry->compl_status);
3759 tgt->abts_resp_expected--;
3760 if (le16_to_cpu(entry->compl_status) !=
3761 ABTS_RESP_COMPL_SUCCESS) {
3762 if ((entry->error_subcode1 == 0x1E) &&
3763 (entry->error_subcode2 == 0)) {
3765 * We've got a race here: aborted
3766 * exchange not terminated, i.e.
3767 * response for the aborted command was
3768 * sent between the abort request was
3769 * received and processed.
3770 * Unfortunately, the firmware has a
3771 * silly requirement that all aborted
3772 * exchanges must be explicitely
3773 * terminated, otherwise it refuses to
3774 * send responses for the abort
3775 * requests. So, we have to
3776 * (re)terminate the exchange and retry
3777 * the abort response.
3779 qlt_24xx_retry_term_exchange(vha,
3782 ql_dbg(ql_dbg_tgt, vha, 0xe063,
3783 "qla_target(%d): ABTS_RESP_24XX "
3784 "failed %x (subcode %x:%x)",
3785 vha->vp_idx, entry->compl_status,
3786 entry->error_subcode1,
3787 entry->error_subcode2);
3790 ql_dbg(ql_dbg_tgt, vha, 0xe064,
3791 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3792 "received\n", vha->vp_idx);
3797 ql_dbg(ql_dbg_tgt, vha, 0xe065,
3798 "qla_target(%d): Received unknown response pkt "
3799 "type %x\n", vha->vp_idx, pkt->entry_type);
3803 tgt->irq_cmd_count--;
3807 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3809 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3812 struct qla_hw_data *ha = vha->hw;
3813 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3816 ql_dbg(ql_dbg_tgt, vha, 0xe039,
3817 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3818 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3819 ha->operating_mode, ha->current_topology);
3821 if (!ha->tgt.tgt_ops)
3824 if (unlikely(tgt == NULL)) {
3825 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3826 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3830 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3834 * In tgt_stop mode we also should allow all requests to pass.
3835 * Otherwise, some commands can stuck.
3838 tgt->irq_cmd_count++;
3841 case MBA_RESET: /* Reset */
3842 case MBA_SYSTEM_ERR: /* System Error */
3843 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3844 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3845 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
3846 "qla_target(%d): System error async event %#x "
3847 "occurred", vha->vp_idx, code);
3849 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
3850 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
3856 "qla_target(%d): Async LOOP_UP occurred "
3857 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
3858 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3859 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3860 if (tgt->link_reinit_iocb_pending) {
3861 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
3863 tgt->link_reinit_iocb_pending = 0;
3868 case MBA_LIP_OCCURRED:
3871 case MBA_RSCN_UPDATE:
3872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
3873 "qla_target(%d): Async event %#x occurred "
3874 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
3875 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3876 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3879 case MBA_PORT_UPDATE:
3880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
3881 "qla_target(%d): Port update async event %#x "
3882 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
3883 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
3884 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3885 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3887 login_code = le16_to_cpu(mailbox[2]);
3888 if (login_code == 0x4)
3889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
3890 "Async MB 2: Got PLOGI Complete\n");
3891 else if (login_code == 0x7)
3892 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
3893 "Async MB 2: Port Logged Out\n");
3897 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
3898 "qla_target(%d): Async event %#x occurred: "
3899 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
3900 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3901 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3905 tgt->irq_cmd_count--;
3908 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
3914 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
3916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
3917 "qla_target(%d): Allocation of tmp FC port failed",
3922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
3924 fcport->loop_id = loop_id;
3926 rc = qla2x00_get_port_database(vha, fcport, 0);
3927 if (rc != QLA_SUCCESS) {
3928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
3929 "qla_target(%d): Failed to retrieve fcport "
3930 "information -- get_port_database() returned %x "
3931 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
3939 /* Must be called under tgt_mutex */
3940 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
3943 struct qla_tgt_sess *sess = NULL;
3944 fc_port_t *fcport = NULL;
3945 int rc, global_resets;
3946 uint16_t loop_id = 0;
3950 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
3952 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
3954 if ((s_id[0] == 0xFF) &&
3955 (s_id[1] == 0xFC)) {
3957 * This is Domain Controller, so it should be
3958 * OK to drop SCSI commands from it.
3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
3961 "Unable to find initiator with S_ID %x:%x:%x",
3962 s_id[0], s_id[1], s_id[2]);
3964 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
3965 "qla_target(%d): Unable to find "
3966 "initiator with S_ID %x:%x:%x",
3967 vha->vp_idx, s_id[0], s_id[1],
3972 fcport = qlt_get_port_database(vha, loop_id);
3976 if (global_resets !=
3977 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
3978 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
3979 "qla_target(%d): global reset during session discovery "
3980 "(counter was %d, new %d), retrying", vha->vp_idx,
3982 atomic_read(&vha->vha_tgt.
3983 qla_tgt->tgt_global_resets_count));
3987 sess = qlt_create_sess(vha, fcport, true);
3993 static void qlt_abort_work(struct qla_tgt *tgt,
3994 struct qla_tgt_sess_work_param *prm)
3996 struct scsi_qla_host *vha = tgt->vha;
3997 struct qla_hw_data *ha = vha->hw;
3998 struct qla_tgt_sess *sess = NULL;
3999 unsigned long flags;
4004 spin_lock_irqsave(&ha->hardware_lock, flags);
4009 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4010 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4011 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4013 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4014 (unsigned char *)&be_s_id);
4016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4018 mutex_lock(&vha->vha_tgt.tgt_mutex);
4019 sess = qlt_make_local_sess(vha, s_id);
4020 /* sess has got an extra creation ref */
4021 mutex_unlock(&vha->vha_tgt.tgt_mutex);
4023 spin_lock_irqsave(&ha->hardware_lock, flags);
4027 kref_get(&sess->se_sess->sess_kref);
4033 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4037 ha->tgt.tgt_ops->put_sess(sess);
4038 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4042 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4044 ha->tgt.tgt_ops->put_sess(sess);
4045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4048 static void qlt_tmr_work(struct qla_tgt *tgt,
4049 struct qla_tgt_sess_work_param *prm)
4051 struct atio_from_isp *a = &prm->tm_iocb2;
4052 struct scsi_qla_host *vha = tgt->vha;
4053 struct qla_hw_data *ha = vha->hw;
4054 struct qla_tgt_sess *sess = NULL;
4055 unsigned long flags;
4056 uint8_t *s_id = NULL; /* to hide compiler warnings */
4058 uint32_t lun, unpacked_lun;
4062 spin_lock_irqsave(&ha->hardware_lock, flags);
4067 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4068 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4070 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4072 mutex_lock(&vha->vha_tgt.tgt_mutex);
4073 sess = qlt_make_local_sess(vha, s_id);
4074 /* sess has got an extra creation ref */
4075 mutex_unlock(&vha->vha_tgt.tgt_mutex);
4077 spin_lock_irqsave(&ha->hardware_lock, flags);
4081 kref_get(&sess->se_sess->sess_kref);
4085 lun = a->u.isp24.fcp_cmnd.lun;
4086 lun_size = sizeof(lun);
4087 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4088 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4090 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4094 ha->tgt.tgt_ops->put_sess(sess);
4095 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4099 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4101 ha->tgt.tgt_ops->put_sess(sess);
4102 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4105 static void qlt_sess_work_fn(struct work_struct *work)
4107 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4108 struct scsi_qla_host *vha = tgt->vha;
4109 unsigned long flags;
4111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4113 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4114 while (!list_empty(&tgt->sess_works_list)) {
4115 struct qla_tgt_sess_work_param *prm = list_entry(
4116 tgt->sess_works_list.next, typeof(*prm),
4117 sess_works_list_entry);
4120 * This work can be scheduled on several CPUs at time, so we
4121 * must delete the entry to eliminate double processing
4123 list_del(&prm->sess_works_list_entry);
4125 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4127 switch (prm->type) {
4128 case QLA_TGT_SESS_WORK_ABORT:
4129 qlt_abort_work(tgt, prm);
4131 case QLA_TGT_SESS_WORK_TM:
4132 qlt_tmr_work(tgt, prm);
4139 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4143 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4146 /* Must be called under tgt_host_action_mutex */
4147 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4149 struct qla_tgt *tgt;
4151 if (!QLA_TGT_MODE_ENABLED())
4154 if (!IS_TGT_MODE_CAPABLE(ha)) {
4155 ql_log(ql_log_warn, base_vha, 0xe070,
4156 "This adapter does not support target mode.\n");
4160 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4161 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
4163 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
4165 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4167 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4168 "Unable to allocate struct qla_tgt\n");
4172 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4173 base_vha->host->hostt->supported_mode |= MODE_TARGET;
4176 tgt->vha = base_vha;
4177 init_waitqueue_head(&tgt->waitQ);
4178 INIT_LIST_HEAD(&tgt->sess_list);
4179 INIT_LIST_HEAD(&tgt->del_sess_list);
4180 INIT_DELAYED_WORK(&tgt->sess_del_work,
4181 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4182 spin_lock_init(&tgt->sess_work_lock);
4183 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4184 INIT_LIST_HEAD(&tgt->sess_works_list);
4185 spin_lock_init(&tgt->srr_lock);
4186 INIT_LIST_HEAD(&tgt->srr_ctio_list);
4187 INIT_LIST_HEAD(&tgt->srr_imm_list);
4188 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4189 atomic_set(&tgt->tgt_global_resets_count, 0);
4191 base_vha->vha_tgt.qla_tgt = tgt;
4193 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4194 "qla_target(%d): using 64 Bit PCI addressing",
4196 tgt->tgt_enable_64bit_addr = 1;
4198 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4199 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4200 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4202 if (base_vha->fc_vport)
4205 mutex_lock(&qla_tgt_mutex);
4206 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4207 mutex_unlock(&qla_tgt_mutex);
4212 /* Must be called under tgt_host_action_mutex */
4213 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4215 if (!vha->vha_tgt.qla_tgt)
4218 if (vha->fc_vport) {
4219 qlt_release(vha->vha_tgt.qla_tgt);
4222 mutex_lock(&qla_tgt_mutex);
4223 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
4224 mutex_unlock(&qla_tgt_mutex);
4226 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4228 qlt_release(vha->vha_tgt.qla_tgt);
4233 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4238 pr_debug("qla2xxx HW vha->node_name: ");
4239 for (i = 0; i < WWN_SIZE; i++)
4240 pr_debug("%02x ", vha->node_name[i]);
4242 pr_debug("qla2xxx HW vha->port_name: ");
4243 for (i = 0; i < WWN_SIZE; i++)
4244 pr_debug("%02x ", vha->port_name[i]);
4247 pr_debug("qla2xxx passed configfs WWPN: ");
4248 put_unaligned_be64(wwpn, b);
4249 for (i = 0; i < WWN_SIZE; i++)
4250 pr_debug("%02x ", b[i]);
4255 * qla_tgt_lport_register - register lport with external module
4257 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4258 * @wwpn: Passwd FC target WWPN
4259 * @callback: lport initialization callback for tcm_qla2xxx code
4260 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4262 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
4263 u64 npiv_wwpn, u64 npiv_wwnn,
4264 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
4266 struct qla_tgt *tgt;
4267 struct scsi_qla_host *vha;
4268 struct qla_hw_data *ha;
4269 struct Scsi_Host *host;
4270 unsigned long flags;
4274 mutex_lock(&qla_tgt_mutex);
4275 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4283 if (!(host->hostt->supported_mode & MODE_TARGET))
4286 spin_lock_irqsave(&ha->hardware_lock, flags);
4287 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
4288 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4290 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4293 if (tgt->tgt_stop) {
4294 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
4296 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4301 if (!scsi_host_get(host)) {
4302 ql_dbg(ql_dbg_tgt, vha, 0xe068,
4303 "Unable to scsi_host_get() for"
4304 " qla2xxx scsi_host\n");
4307 qlt_lport_dump(vha, phys_wwpn, b);
4309 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4310 scsi_host_put(host);
4313 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
4315 scsi_host_put(host);
4317 mutex_unlock(&qla_tgt_mutex);
4320 mutex_unlock(&qla_tgt_mutex);
4324 EXPORT_SYMBOL(qlt_lport_register);
4327 * qla_tgt_lport_deregister - Degister lport
4329 * @vha: Registered scsi_qla_host pointer
4331 void qlt_lport_deregister(struct scsi_qla_host *vha)
4333 struct qla_hw_data *ha = vha->hw;
4334 struct Scsi_Host *sh = vha->host;
4336 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4338 vha->vha_tgt.target_lport_ptr = NULL;
4339 ha->tgt.tgt_ops = NULL;
4341 * Release the Scsi_Host reference for the underlying qla2xxx host
4345 EXPORT_SYMBOL(qlt_lport_deregister);
4347 /* Must be called under HW lock */
4348 void qlt_set_mode(struct scsi_qla_host *vha)
4350 struct qla_hw_data *ha = vha->hw;
4352 switch (ql2x_ini_mode) {
4353 case QLA2XXX_INI_MODE_DISABLED:
4354 case QLA2XXX_INI_MODE_EXCLUSIVE:
4355 vha->host->active_mode = MODE_TARGET;
4357 case QLA2XXX_INI_MODE_ENABLED:
4358 vha->host->active_mode |= MODE_TARGET;
4364 if (ha->tgt.ini_mode_force_reverse)
4365 qla_reverse_ini_mode(vha);
4368 /* Must be called under HW lock */
4369 void qlt_clear_mode(struct scsi_qla_host *vha)
4371 struct qla_hw_data *ha = vha->hw;
4373 switch (ql2x_ini_mode) {
4374 case QLA2XXX_INI_MODE_DISABLED:
4375 vha->host->active_mode = MODE_UNKNOWN;
4377 case QLA2XXX_INI_MODE_EXCLUSIVE:
4378 vha->host->active_mode = MODE_INITIATOR;
4380 case QLA2XXX_INI_MODE_ENABLED:
4381 vha->host->active_mode &= ~MODE_TARGET;
4387 if (ha->tgt.ini_mode_force_reverse)
4388 qla_reverse_ini_mode(vha);
4392 * qla_tgt_enable_vha - NO LOCK HELD
4394 * host_reset, bring up w/ Target Mode Enabled
4397 qlt_enable_vha(struct scsi_qla_host *vha)
4399 struct qla_hw_data *ha = vha->hw;
4400 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4401 unsigned long flags;
4402 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4405 ql_dbg(ql_dbg_tgt, vha, 0xe069,
4406 "Unable to locate qla_tgt pointer from"
4407 " struct qla_hw_data\n");
4412 spin_lock_irqsave(&ha->hardware_lock, flags);
4413 tgt->tgt_stopped = 0;
4415 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4418 qla24xx_disable_vp(vha);
4419 qla24xx_enable_vp(vha);
4421 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
4422 qla2xxx_wake_dpc(base_vha);
4423 qla2x00_wait_for_hba_online(base_vha);
4426 EXPORT_SYMBOL(qlt_enable_vha);
4429 * qla_tgt_disable_vha - NO LOCK HELD
4431 * Disable Target Mode and reset the adapter
4434 qlt_disable_vha(struct scsi_qla_host *vha)
4436 struct qla_hw_data *ha = vha->hw;
4437 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4438 unsigned long flags;
4441 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4442 "Unable to locate qla_tgt pointer from"
4443 " struct qla_hw_data\n");
4448 spin_lock_irqsave(&ha->hardware_lock, flags);
4449 qlt_clear_mode(vha);
4450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4452 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4453 qla2xxx_wake_dpc(vha);
4454 qla2x00_wait_for_hba_online(vha);
4458 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4459 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4463 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4465 if (!qla_tgt_mode_enabled(vha))
4468 vha->vha_tgt.qla_tgt = NULL;
4470 mutex_init(&vha->vha_tgt.tgt_mutex);
4471 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
4473 qlt_clear_mode(vha);
4476 * NOTE: Currently the value is kept the same for <24xx and
4477 * >=24xx ISPs. If it is necessary to change it,
4478 * the check should be added for specific ISPs,
4479 * assigning the value appropriately.
4481 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4483 qlt_add_target(ha, vha);
4487 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4490 * FC-4 Feature bit 0 indicates target functionality to the name server.
4492 if (qla_tgt_mode_enabled(vha)) {
4493 if (qla_ini_mode_enabled(vha))
4494 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4496 ct_req->req.rff_id.fc4_feature = BIT_0;
4497 } else if (qla_ini_mode_enabled(vha)) {
4498 ct_req->req.rff_id.fc4_feature = BIT_1;
4503 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4506 * Beginning of ATIO ring has initialization control block already built
4507 * by nvram config routine.
4509 * Returns 0 on success.
4512 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
4514 struct qla_hw_data *ha = vha->hw;
4516 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4518 if (!qla_tgt_mode_enabled(vha))
4521 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4522 pkt->u.raw.signature = ATIO_PROCESSED;
4529 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4530 * @ha: SCSI driver HA context
4533 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
4535 struct qla_hw_data *ha = vha->hw;
4536 struct atio_from_isp *pkt;
4539 if (!vha->flags.online)
4542 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4543 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4544 cnt = pkt->u.raw.entry_count;
4546 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4548 for (i = 0; i < cnt; i++) {
4549 ha->tgt.atio_ring_index++;
4550 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4551 ha->tgt.atio_ring_index = 0;
4552 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4554 ha->tgt.atio_ring_ptr++;
4556 pkt->u.raw.signature = ATIO_PROCESSED;
4557 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4562 /* Adjust ring index */
4563 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
4567 qlt_24xx_config_rings(struct scsi_qla_host *vha)
4569 struct qla_hw_data *ha = vha->hw;
4570 if (!QLA_TGT_MODE_ENABLED())
4573 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
4574 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
4575 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
4577 if (IS_ATIO_MSIX_CAPABLE(ha)) {
4578 struct qla_msix_entry *msix = &ha->msix_entries[2];
4579 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
4581 icb->msix_atio = cpu_to_le16(msix->entry);
4582 ql_dbg(ql_dbg_init, vha, 0xf072,
4583 "Registering ICB vector 0x%x for atio que.\n",
4589 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
4591 struct qla_hw_data *ha = vha->hw;
4593 if (qla_tgt_mode_enabled(vha)) {
4594 if (!ha->tgt.saved_set) {
4595 /* We save only once */
4596 ha->tgt.saved_exchange_count = nv->exchange_count;
4597 ha->tgt.saved_firmware_options_1 =
4598 nv->firmware_options_1;
4599 ha->tgt.saved_firmware_options_2 =
4600 nv->firmware_options_2;
4601 ha->tgt.saved_firmware_options_3 =
4602 nv->firmware_options_3;
4603 ha->tgt.saved_set = 1;
4606 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4608 /* Enable target mode */
4609 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4611 /* Disable ini mode, if requested */
4612 if (!qla_ini_mode_enabled(vha))
4613 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
4615 /* Disable Full Login after LIP */
4616 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4617 /* Enable initial LIP */
4618 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4619 /* Enable FC tapes support */
4620 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4621 /* Disable Full Login after LIP */
4622 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4623 /* Enable target PRLI control */
4624 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4626 if (ha->tgt.saved_set) {
4627 nv->exchange_count = ha->tgt.saved_exchange_count;
4628 nv->firmware_options_1 =
4629 ha->tgt.saved_firmware_options_1;
4630 nv->firmware_options_2 =
4631 ha->tgt.saved_firmware_options_2;
4632 nv->firmware_options_3 =
4633 ha->tgt.saved_firmware_options_3;
4638 /* out-of-order frames reassembly */
4639 nv->firmware_options_3 |= BIT_6|BIT_9;
4641 if (ha->tgt.enable_class_2) {
4642 if (vha->flags.init_done)
4643 fc_host_supported_classes(vha->host) =
4644 FC_COS_CLASS2 | FC_COS_CLASS3;
4646 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4648 if (vha->flags.init_done)
4649 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4651 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4656 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
4657 struct init_cb_24xx *icb)
4659 struct qla_hw_data *ha = vha->hw;
4661 if (ha->tgt.node_name_set) {
4662 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4663 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4668 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
4670 struct qla_hw_data *ha = vha->hw;
4672 if (!QLA_TGT_MODE_ENABLED())
4675 if (qla_tgt_mode_enabled(vha)) {
4676 if (!ha->tgt.saved_set) {
4677 /* We save only once */
4678 ha->tgt.saved_exchange_count = nv->exchange_count;
4679 ha->tgt.saved_firmware_options_1 =
4680 nv->firmware_options_1;
4681 ha->tgt.saved_firmware_options_2 =
4682 nv->firmware_options_2;
4683 ha->tgt.saved_firmware_options_3 =
4684 nv->firmware_options_3;
4685 ha->tgt.saved_set = 1;
4688 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4690 /* Enable target mode */
4691 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4693 /* Disable ini mode, if requested */
4694 if (!qla_ini_mode_enabled(vha))
4695 nv->firmware_options_1 |=
4696 __constant_cpu_to_le32(BIT_5);
4698 /* Disable Full Login after LIP */
4699 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4700 /* Enable initial LIP */
4701 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4702 /* Enable FC tapes support */
4703 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4704 /* Disable Full Login after LIP */
4705 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4706 /* Enable target PRLI control */
4707 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4709 if (ha->tgt.saved_set) {
4710 nv->exchange_count = ha->tgt.saved_exchange_count;
4711 nv->firmware_options_1 =
4712 ha->tgt.saved_firmware_options_1;
4713 nv->firmware_options_2 =
4714 ha->tgt.saved_firmware_options_2;
4715 nv->firmware_options_3 =
4716 ha->tgt.saved_firmware_options_3;
4721 /* out-of-order frames reassembly */
4722 nv->firmware_options_3 |= BIT_6|BIT_9;
4724 if (ha->tgt.enable_class_2) {
4725 if (vha->flags.init_done)
4726 fc_host_supported_classes(vha->host) =
4727 FC_COS_CLASS2 | FC_COS_CLASS3;
4729 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4731 if (vha->flags.init_done)
4732 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4734 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4739 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
4740 struct init_cb_81xx *icb)
4742 struct qla_hw_data *ha = vha->hw;
4744 if (!QLA_TGT_MODE_ENABLED())
4747 if (ha->tgt.node_name_set) {
4748 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4749 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4754 qlt_83xx_iospace_config(struct qla_hw_data *ha)
4756 if (!QLA_TGT_MODE_ENABLED())
4759 ha->msix_count += 1; /* For ATIO Q */
4763 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4764 struct sts_entry_24xx *pkt)
4766 switch (pkt->entry_type) {
4767 case ABTS_RECV_24XX:
4768 case ABTS_RESP_24XX:
4770 case NOTIFY_ACK_TYPE:
4778 qlt_modify_vp_config(struct scsi_qla_host *vha,
4779 struct vp_config_entry_24xx *vpmod)
4781 if (qla_tgt_mode_enabled(vha))
4782 vpmod->options_idx1 &= ~BIT_5;
4783 /* Disable ini mode, if requested */
4784 if (!qla_ini_mode_enabled(vha))
4785 vpmod->options_idx1 &= ~BIT_4;
4789 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4791 if (!QLA_TGT_MODE_ENABLED())
4794 if (ha->mqenable || IS_QLA83XX(ha)) {
4795 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
4796 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
4798 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
4799 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
4802 mutex_init(&base_vha->vha_tgt.tgt_mutex);
4803 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
4804 qlt_clear_mode(base_vha);
4808 qla83xx_msix_atio_q(int irq, void *dev_id)
4810 struct rsp_que *rsp;
4811 scsi_qla_host_t *vha;
4812 struct qla_hw_data *ha;
4813 unsigned long flags;
4815 rsp = (struct rsp_que *) dev_id;
4817 vha = pci_get_drvdata(ha->pdev);
4819 spin_lock_irqsave(&ha->hardware_lock, flags);
4821 qlt_24xx_process_atio_queue(vha);
4822 qla24xx_process_response_queue(vha, rsp);
4824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4830 qlt_mem_alloc(struct qla_hw_data *ha)
4832 if (!QLA_TGT_MODE_ENABLED())
4835 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4836 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
4837 if (!ha->tgt.tgt_vp_map)
4840 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4841 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4842 &ha->tgt.atio_dma, GFP_KERNEL);
4843 if (!ha->tgt.atio_ring) {
4844 kfree(ha->tgt.tgt_vp_map);
4851 qlt_mem_free(struct qla_hw_data *ha)
4853 if (!QLA_TGT_MODE_ENABLED())
4856 if (ha->tgt.atio_ring) {
4857 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4858 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4861 kfree(ha->tgt.tgt_vp_map);
4864 /* vport_slock to be held by the caller */
4866 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
4868 if (!QLA_TGT_MODE_ENABLED())
4873 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
4876 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
4879 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
4882 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
4887 static int __init qlt_parse_ini_mode(void)
4889 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
4890 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
4891 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
4892 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
4893 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
4894 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
4901 int __init qlt_init(void)
4905 if (!qlt_parse_ini_mode()) {
4906 ql_log(ql_log_fatal, NULL, 0xe06b,
4907 "qlt_parse_ini_mode() failed\n");
4911 if (!QLA_TGT_MODE_ENABLED())
4914 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4915 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4917 if (!qla_tgt_cmd_cachep) {
4918 ql_log(ql_log_fatal, NULL, 0xe06c,
4919 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4925 qla_tgt_mgmt_cmd), 0, NULL);
4926 if (!qla_tgt_mgmt_cmd_cachep) {
4927 ql_log(ql_log_fatal, NULL, 0xe06d,
4928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4933 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
4934 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
4935 if (!qla_tgt_mgmt_cmd_mempool) {
4936 ql_log(ql_log_fatal, NULL, 0xe06e,
4937 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4939 goto out_mgmt_cmd_cachep;
4942 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
4944 ql_log(ql_log_fatal, NULL, 0xe06f,
4945 "alloc_workqueue for qla_tgt_wq failed\n");
4947 goto out_cmd_mempool;
4950 * Return 1 to signal that initiator-mode is being disabled
4952 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
4955 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4956 out_mgmt_cmd_cachep:
4957 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4959 kmem_cache_destroy(qla_tgt_cmd_cachep);
4965 if (!QLA_TGT_MODE_ENABLED())
4968 destroy_workqueue(qla_tgt_wq);
4969 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4970 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4971 kmem_cache_destroy(qla_tgt_cmd_cachep);