Merge branch 'work.thaw' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_target.c
1 /*
2  *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3  *
4  *  based on qla2x00t.c code:
5  *
6  *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7  *  Copyright (C) 2004 - 2005 Leonid Stoljar
8  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9  *  Copyright (C) 2006 - 2010 ID7 Ltd.
10  *
11  *  Forward port and refactoring to modern qla2xxx and target/configfs
12  *
13  *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation, version 2
18  *  of the License.
19  *
20  *  This program is distributed in the hope that it will be useful,
21  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  *  GNU General Public License for more details.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41
42 #include "qla_def.h"
43 #include "qla_target.h"
44
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48                 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
49
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53         "Determines when initiator mode will be enabled. Possible values: "
54         "\"exclusive\" - initiator mode will be enabled on load, "
55         "disabled on enabling target mode and then on disabling target mode "
56         "enabled back; "
57         "\"disabled\" - initiator mode will never be enabled; "
58         "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
59         "when ready "
60         "\"enabled\" (default) - initiator mode will always stay enabled.");
61
62 static int ql_dm_tgt_ex_pct = 0;
63 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
64 MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
65         "For Dual Mode (qlini_mode=dual), this parameter determines "
66         "the percentage of exchanges/cmds FW will allocate resources "
67         "for Target mode.");
68
69 int ql2xuctrlirq = 1;
70 module_param(ql2xuctrlirq, int, 0644);
71 MODULE_PARM_DESC(ql2xuctrlirq,
72     "User to control IRQ placement via smp_affinity."
73     "Valid with qlini_mode=disabled."
74     "1(default): enable");
75
76 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
77
78 static int qla_sam_status = SAM_STAT_BUSY;
79 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
80
81 /*
82  * From scsi/fc/fc_fcp.h
83  */
84 enum fcp_resp_rsp_codes {
85         FCP_TMF_CMPL = 0,
86         FCP_DATA_LEN_INVALID = 1,
87         FCP_CMND_FIELDS_INVALID = 2,
88         FCP_DATA_PARAM_MISMATCH = 3,
89         FCP_TMF_REJECTED = 4,
90         FCP_TMF_FAILED = 5,
91         FCP_TMF_INVALID_LUN = 9,
92 };
93
94 /*
95  * fc_pri_ta from scsi/fc/fc_fcp.h
96  */
97 #define FCP_PTA_SIMPLE      0   /* simple task attribute */
98 #define FCP_PTA_HEADQ       1   /* head of queue task attribute */
99 #define FCP_PTA_ORDERED     2   /* ordered task attribute */
100 #define FCP_PTA_ACA         4   /* auto. contingent allegiance */
101 #define FCP_PTA_MASK        7   /* mask for task attribute field */
102 #define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
103 #define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
104
105 /*
106  * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
107  * must be called under HW lock and could unlock/lock it inside.
108  * It isn't an issue, since in the current implementation on the time when
109  * those functions are called:
110  *
111  *   - Either context is IRQ and only IRQ handler can modify HW data,
112  *     including rings related fields,
113  *
114  *   - Or access to target mode variables from struct qla_tgt doesn't
115  *     cross those functions boundaries, except tgt_stop, which
116  *     additionally protected by irq_cmd_count.
117  */
118 /* Predefs for callbacks handed to qla2xxx LLD */
119 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
120         struct atio_from_isp *pkt, uint8_t);
121 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
122         response_t *pkt);
123 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
124         int fn, void *iocb, int flags);
125 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
126         *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
127 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
128         struct atio_from_isp *atio, uint16_t status, int qfull);
129 static void qlt_disable_vha(struct scsi_qla_host *vha);
130 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
131 static void qlt_send_notify_ack(struct qla_qpair *qpair,
132         struct imm_ntfy_from_isp *ntfy,
133         uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
134         uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
135 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
136         struct imm_ntfy_from_isp *imm, int ha_locked);
137 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
138         fc_port_t *fcport, bool local);
139 void qlt_unreg_sess(struct fc_port *sess);
140 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
141         struct abts_recv_from_24xx *);
142 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
143     uint16_t);
144
145 /*
146  * Global Variables
147  */
148 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
149 struct kmem_cache *qla_tgt_plogi_cachep;
150 static mempool_t *qla_tgt_mgmt_cmd_mempool;
151 static struct workqueue_struct *qla_tgt_wq;
152 static DEFINE_MUTEX(qla_tgt_mutex);
153 static LIST_HEAD(qla_tgt_glist);
154
155 static const char *prot_op_str(u32 prot_op)
156 {
157         switch (prot_op) {
158         case TARGET_PROT_NORMAL:        return "NORMAL";
159         case TARGET_PROT_DIN_INSERT:    return "DIN_INSERT";
160         case TARGET_PROT_DOUT_INSERT:   return "DOUT_INSERT";
161         case TARGET_PROT_DIN_STRIP:     return "DIN_STRIP";
162         case TARGET_PROT_DOUT_STRIP:    return "DOUT_STRIP";
163         case TARGET_PROT_DIN_PASS:      return "DIN_PASS";
164         case TARGET_PROT_DOUT_PASS:     return "DOUT_PASS";
165         default:                        return "UNKNOWN";
166         }
167 }
168
169 /* This API intentionally takes dest as a parameter, rather than returning
170  * int value to avoid caller forgetting to issue wmb() after the store */
171 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
172 {
173         scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
174         *dest = atomic_inc_return(&base_vha->generation_tick);
175         /* memory barrier */
176         wmb();
177 }
178
179 /* Might release hw lock, then reaquire!! */
180 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
181 {
182         /* Send marker if required */
183         if (unlikely(vha->marker_needed != 0)) {
184                 int rc = qla2x00_issue_marker(vha, vha_locked);
185                 if (rc != QLA_SUCCESS) {
186                         ql_dbg(ql_dbg_tgt, vha, 0xe03d,
187                             "qla_target(%d): issue_marker() failed\n",
188                             vha->vp_idx);
189                 }
190                 return rc;
191         }
192         return QLA_SUCCESS;
193 }
194
195 static inline
196 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
197         uint8_t *d_id)
198 {
199         struct scsi_qla_host *host;
200         uint32_t key = 0;
201
202         if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
203             (vha->d_id.b.al_pa == d_id[2]))
204                 return vha;
205
206         key  = (uint32_t)d_id[0] << 16;
207         key |= (uint32_t)d_id[1] <<  8;
208         key |= (uint32_t)d_id[2];
209
210         host = btree_lookup32(&vha->hw->tgt.host_map, key);
211         if (!host)
212                 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
213                     "Unable to find host %06x\n", key);
214
215         return host;
216 }
217
218 static inline
219 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
220         uint16_t vp_idx)
221 {
222         struct qla_hw_data *ha = vha->hw;
223
224         if (vha->vp_idx == vp_idx)
225                 return vha;
226
227         BUG_ON(ha->tgt.tgt_vp_map == NULL);
228         if (likely(test_bit(vp_idx, ha->vp_idx_map)))
229                 return ha->tgt.tgt_vp_map[vp_idx].vha;
230
231         return NULL;
232 }
233
234 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
235 {
236         unsigned long flags;
237
238         spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
239
240         vha->hw->tgt.num_pend_cmds++;
241         if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
242                 vha->qla_stats.stat_max_pend_cmds =
243                         vha->hw->tgt.num_pend_cmds;
244         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
245 }
246 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
247 {
248         unsigned long flags;
249
250         spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
251         vha->hw->tgt.num_pend_cmds--;
252         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
253 }
254
255
256 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
257         struct atio_from_isp *atio, uint8_t ha_locked)
258 {
259         struct qla_tgt_sess_op *u;
260         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
261         unsigned long flags;
262
263         if (tgt->tgt_stop) {
264                 ql_dbg(ql_dbg_async, vha, 0x502c,
265                     "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
266                     vha->vp_idx);
267                 goto out_term;
268         }
269
270         u = kzalloc(sizeof(*u), GFP_ATOMIC);
271         if (u == NULL)
272                 goto out_term;
273
274         u->vha = vha;
275         memcpy(&u->atio, atio, sizeof(*atio));
276         INIT_LIST_HEAD(&u->cmd_list);
277
278         spin_lock_irqsave(&vha->cmd_list_lock, flags);
279         list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
280         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
281
282         schedule_delayed_work(&vha->unknown_atio_work, 1);
283
284 out:
285         return;
286
287 out_term:
288         qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
289         goto out;
290 }
291
292 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
293         uint8_t ha_locked)
294 {
295         struct qla_tgt_sess_op *u, *t;
296         scsi_qla_host_t *host;
297         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
298         unsigned long flags;
299         uint8_t queued = 0;
300
301         list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
302                 if (u->aborted) {
303                         ql_dbg(ql_dbg_async, vha, 0x502e,
304                             "Freeing unknown %s %p, because of Abort\n",
305                             "ATIO_TYPE7", u);
306                         qlt_send_term_exchange(vha->hw->base_qpair, NULL,
307                             &u->atio, ha_locked, 0);
308                         goto abort;
309                 }
310
311                 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
312                 if (host != NULL) {
313                         ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
314                             "Requeuing unknown ATIO_TYPE7 %p\n", u);
315                         qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
316                 } else if (tgt->tgt_stop) {
317                         ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
318                             "Freeing unknown %s %p, because tgt is being stopped\n",
319                             "ATIO_TYPE7", u);
320                         qlt_send_term_exchange(vha->hw->base_qpair, NULL,
321                             &u->atio, ha_locked, 0);
322                 } else {
323                         ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
324                             "Reschedule u %p, vha %p, host %p\n", u, vha, host);
325                         if (!queued) {
326                                 queued = 1;
327                                 schedule_delayed_work(&vha->unknown_atio_work,
328                                     1);
329                         }
330                         continue;
331                 }
332
333 abort:
334                 spin_lock_irqsave(&vha->cmd_list_lock, flags);
335                 list_del(&u->cmd_list);
336                 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
337                 kfree(u);
338         }
339 }
340
341 void qlt_unknown_atio_work_fn(struct work_struct *work)
342 {
343         struct scsi_qla_host *vha = container_of(to_delayed_work(work),
344             struct scsi_qla_host, unknown_atio_work);
345
346         qlt_try_to_dequeue_unknown_atios(vha, 0);
347 }
348
349 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
350         struct atio_from_isp *atio, uint8_t ha_locked)
351 {
352         ql_dbg(ql_dbg_tgt, vha, 0xe072,
353                 "%s: qla_target(%d): type %x ox_id %04x\n",
354                 __func__, vha->vp_idx, atio->u.raw.entry_type,
355                 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
356
357         switch (atio->u.raw.entry_type) {
358         case ATIO_TYPE7:
359         {
360                 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
361                     atio->u.isp24.fcp_hdr.d_id);
362                 if (unlikely(NULL == host)) {
363                         ql_dbg(ql_dbg_tgt, vha, 0xe03e,
364                             "qla_target(%d): Received ATIO_TYPE7 "
365                             "with unknown d_id %x:%x:%x\n", vha->vp_idx,
366                             atio->u.isp24.fcp_hdr.d_id[0],
367                             atio->u.isp24.fcp_hdr.d_id[1],
368                             atio->u.isp24.fcp_hdr.d_id[2]);
369
370
371                         qlt_queue_unknown_atio(vha, atio, ha_locked);
372                         break;
373                 }
374                 if (unlikely(!list_empty(&vha->unknown_atio_list)))
375                         qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
376
377                 qlt_24xx_atio_pkt(host, atio, ha_locked);
378                 break;
379         }
380
381         case IMMED_NOTIFY_TYPE:
382         {
383                 struct scsi_qla_host *host = vha;
384                 struct imm_ntfy_from_isp *entry =
385                     (struct imm_ntfy_from_isp *)atio;
386
387                 qlt_issue_marker(vha, ha_locked);
388
389                 if ((entry->u.isp24.vp_index != 0xFF) &&
390                     (entry->u.isp24.nport_handle != 0xFFFF)) {
391                         host = qlt_find_host_by_vp_idx(vha,
392                             entry->u.isp24.vp_index);
393                         if (unlikely(!host)) {
394                                 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
395                                     "qla_target(%d): Received "
396                                     "ATIO (IMMED_NOTIFY_TYPE) "
397                                     "with unknown vp_index %d\n",
398                                     vha->vp_idx, entry->u.isp24.vp_index);
399                                 break;
400                         }
401                 }
402                 qlt_24xx_atio_pkt(host, atio, ha_locked);
403                 break;
404         }
405
406         case VP_RPT_ID_IOCB_TYPE:
407                 qla24xx_report_id_acquisition(vha,
408                         (struct vp_rpt_id_entry_24xx *)atio);
409                 break;
410
411         case ABTS_RECV_24XX:
412         {
413                 struct abts_recv_from_24xx *entry =
414                         (struct abts_recv_from_24xx *)atio;
415                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
416                         entry->vp_index);
417                 unsigned long flags;
418
419                 if (unlikely(!host)) {
420                         ql_dbg(ql_dbg_tgt, vha, 0xe00a,
421                             "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
422                             "received, with unknown vp_index %d\n",
423                             vha->vp_idx, entry->vp_index);
424                         break;
425                 }
426                 if (!ha_locked)
427                         spin_lock_irqsave(&host->hw->hardware_lock, flags);
428                 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
429                 if (!ha_locked)
430                         spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
431                 break;
432         }
433
434         /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
435
436         default:
437                 ql_dbg(ql_dbg_tgt, vha, 0xe040,
438                     "qla_target(%d): Received unknown ATIO atio "
439                     "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
440                 break;
441         }
442
443         return false;
444 }
445
446 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
447         struct rsp_que *rsp, response_t *pkt)
448 {
449         switch (pkt->entry_type) {
450         case CTIO_CRC2:
451                 ql_dbg(ql_dbg_tgt, vha, 0xe073,
452                         "qla_target(%d):%s: CRC2 Response pkt\n",
453                         vha->vp_idx, __func__);
454                 /* fall through */
455         case CTIO_TYPE7:
456         {
457                 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
458                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
459                     entry->vp_index);
460                 if (unlikely(!host)) {
461                         ql_dbg(ql_dbg_tgt, vha, 0xe041,
462                             "qla_target(%d): Response pkt (CTIO_TYPE7) "
463                             "received, with unknown vp_index %d\n",
464                             vha->vp_idx, entry->vp_index);
465                         break;
466                 }
467                 qlt_response_pkt(host, rsp, pkt);
468                 break;
469         }
470
471         case IMMED_NOTIFY_TYPE:
472         {
473                 struct scsi_qla_host *host = vha;
474                 struct imm_ntfy_from_isp *entry =
475                     (struct imm_ntfy_from_isp *)pkt;
476
477                 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
478                 if (unlikely(!host)) {
479                         ql_dbg(ql_dbg_tgt, vha, 0xe042,
480                             "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
481                             "received, with unknown vp_index %d\n",
482                             vha->vp_idx, entry->u.isp24.vp_index);
483                         break;
484                 }
485                 qlt_response_pkt(host, rsp, pkt);
486                 break;
487         }
488
489         case NOTIFY_ACK_TYPE:
490         {
491                 struct scsi_qla_host *host = vha;
492                 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
493
494                 if (0xFF != entry->u.isp24.vp_index) {
495                         host = qlt_find_host_by_vp_idx(vha,
496                             entry->u.isp24.vp_index);
497                         if (unlikely(!host)) {
498                                 ql_dbg(ql_dbg_tgt, vha, 0xe043,
499                                     "qla_target(%d): Response "
500                                     "pkt (NOTIFY_ACK_TYPE) "
501                                     "received, with unknown "
502                                     "vp_index %d\n", vha->vp_idx,
503                                     entry->u.isp24.vp_index);
504                                 break;
505                         }
506                 }
507                 qlt_response_pkt(host, rsp, pkt);
508                 break;
509         }
510
511         case ABTS_RECV_24XX:
512         {
513                 struct abts_recv_from_24xx *entry =
514                     (struct abts_recv_from_24xx *)pkt;
515                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
516                     entry->vp_index);
517                 if (unlikely(!host)) {
518                         ql_dbg(ql_dbg_tgt, vha, 0xe044,
519                             "qla_target(%d): Response pkt "
520                             "(ABTS_RECV_24XX) received, with unknown "
521                             "vp_index %d\n", vha->vp_idx, entry->vp_index);
522                         break;
523                 }
524                 qlt_response_pkt(host, rsp, pkt);
525                 break;
526         }
527
528         case ABTS_RESP_24XX:
529         {
530                 struct abts_resp_to_24xx *entry =
531                     (struct abts_resp_to_24xx *)pkt;
532                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
533                     entry->vp_index);
534                 if (unlikely(!host)) {
535                         ql_dbg(ql_dbg_tgt, vha, 0xe045,
536                             "qla_target(%d): Response pkt "
537                             "(ABTS_RECV_24XX) received, with unknown "
538                             "vp_index %d\n", vha->vp_idx, entry->vp_index);
539                         break;
540                 }
541                 qlt_response_pkt(host, rsp, pkt);
542                 break;
543         }
544
545         default:
546                 qlt_response_pkt(vha, rsp, pkt);
547                 break;
548         }
549
550 }
551
552 /*
553  * All qlt_plogi_ack_t operations are protected by hardware_lock
554  */
555 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
556         struct imm_ntfy_from_isp *ntfy, int type)
557 {
558         struct qla_work_evt *e;
559         e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
560         if (!e)
561                 return QLA_FUNCTION_FAILED;
562
563         e->u.nack.fcport = fcport;
564         e->u.nack.type = type;
565         memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
566         return qla2x00_post_work(vha, e);
567 }
568
569 static
570 void qla2x00_async_nack_sp_done(void *s, int res)
571 {
572         struct srb *sp = (struct srb *)s;
573         struct scsi_qla_host *vha = sp->vha;
574         unsigned long flags;
575
576         ql_dbg(ql_dbg_disc, vha, 0x20f2,
577             "Async done-%s res %x %8phC  type %d\n",
578             sp->name, res, sp->fcport->port_name, sp->type);
579
580         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
581         sp->fcport->flags &= ~FCF_ASYNC_SENT;
582         sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
583
584         switch (sp->type) {
585         case SRB_NACK_PLOGI:
586                 sp->fcport->login_gen++;
587                 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
588                 sp->fcport->logout_on_delete = 1;
589                 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
590                 sp->fcport->send_els_logo = 0;
591                 break;
592
593         case SRB_NACK_PRLI:
594                 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
595                 sp->fcport->deleted = 0;
596                 sp->fcport->send_els_logo = 0;
597
598                 if (!sp->fcport->login_succ &&
599                     !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
600                         sp->fcport->login_succ = 1;
601
602                         vha->fcport_count++;
603
604                         if (!IS_IIDMA_CAPABLE(vha->hw) ||
605                             !vha->hw->flags.gpsc_supported) {
606                                 ql_dbg(ql_dbg_disc, vha, 0x20f3,
607                                     "%s %d %8phC post upd_fcport fcp_cnt %d\n",
608                                     __func__, __LINE__,
609                                     sp->fcport->port_name,
610                                     vha->fcport_count);
611                                 sp->fcport->disc_state = DSC_UPD_FCPORT;
612                                 qla24xx_post_upd_fcport_work(vha, sp->fcport);
613                         } else {
614                                 ql_dbg(ql_dbg_disc, vha, 0x20f5,
615                                     "%s %d %8phC post gpsc fcp_cnt %d\n",
616                                     __func__, __LINE__,
617                                     sp->fcport->port_name,
618                                     vha->fcport_count);
619
620                                 qla24xx_post_gpsc_work(vha, sp->fcport);
621                         }
622                 }
623                 break;
624
625         case SRB_NACK_LOGO:
626                 sp->fcport->login_gen++;
627                 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
628                 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
629                 break;
630         }
631         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
632
633         sp->free(sp);
634 }
635
636 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
637         struct imm_ntfy_from_isp *ntfy, int type)
638 {
639         int rval = QLA_FUNCTION_FAILED;
640         srb_t *sp;
641         char *c = NULL;
642
643         fcport->flags |= FCF_ASYNC_SENT;
644         switch (type) {
645         case SRB_NACK_PLOGI:
646                 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
647                 c = "PLOGI";
648                 break;
649         case SRB_NACK_PRLI:
650                 fcport->fw_login_state = DSC_LS_PRLI_PEND;
651                 fcport->deleted = 0;
652                 c = "PRLI";
653                 break;
654         case SRB_NACK_LOGO:
655                 fcport->fw_login_state = DSC_LS_LOGO_PEND;
656                 c = "LOGO";
657                 break;
658         }
659
660         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
661         if (!sp)
662                 goto done;
663
664         sp->type = type;
665         sp->name = "nack";
666
667         qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
668
669         sp->u.iocb_cmd.u.nack.ntfy = ntfy;
670         sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
671         sp->done = qla2x00_async_nack_sp_done;
672
673         rval = qla2x00_start_sp(sp);
674         if (rval != QLA_SUCCESS)
675                 goto done_free_sp;
676
677         ql_dbg(ql_dbg_disc, vha, 0x20f4,
678             "Async-%s %8phC hndl %x %s\n",
679             sp->name, fcport->port_name, sp->handle, c);
680
681         return rval;
682
683 done_free_sp:
684         sp->free(sp);
685 done:
686         fcport->flags &= ~FCF_ASYNC_SENT;
687         return rval;
688 }
689
690 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
691 {
692         fc_port_t *t;
693         unsigned long flags;
694
695         switch (e->u.nack.type) {
696         case SRB_NACK_PRLI:
697                 mutex_lock(&vha->vha_tgt.tgt_mutex);
698                 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
699                 mutex_unlock(&vha->vha_tgt.tgt_mutex);
700                 if (t) {
701                         ql_log(ql_log_info, vha, 0xd034,
702                             "%s create sess success %p", __func__, t);
703                         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
704                         /* create sess has an extra kref */
705                         vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
706                         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
707                 }
708                 break;
709         }
710         qla24xx_async_notify_ack(vha, e->u.nack.fcport,
711             (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
712 }
713
714 void qla24xx_delete_sess_fn(struct work_struct *work)
715 {
716         fc_port_t *fcport = container_of(work, struct fc_port, del_work);
717         struct qla_hw_data *ha = fcport->vha->hw;
718         unsigned long flags;
719
720         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
721
722         if (fcport->se_sess) {
723                 ha->tgt.tgt_ops->shutdown_sess(fcport);
724                 ha->tgt.tgt_ops->put_sess(fcport);
725         } else {
726                 qlt_unreg_sess(fcport);
727         }
728         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
729 }
730
731 /*
732  * Called from qla2x00_reg_remote_port()
733  */
734 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
735 {
736         struct qla_hw_data *ha = vha->hw;
737         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
738         struct fc_port *sess = fcport;
739         unsigned long flags;
740
741         if (!vha->hw->tgt.tgt_ops)
742                 return;
743
744         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
745         if (tgt->tgt_stop) {
746                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
747                 return;
748         }
749
750         if (fcport->disc_state == DSC_DELETE_PEND) {
751                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
752                 return;
753         }
754
755         if (!sess->se_sess) {
756                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
757
758                 mutex_lock(&vha->vha_tgt.tgt_mutex);
759                 sess = qlt_create_sess(vha, fcport, false);
760                 mutex_unlock(&vha->vha_tgt.tgt_mutex);
761
762                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
763         } else {
764                 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
765                         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
766                         return;
767                 }
768
769                 if (!kref_get_unless_zero(&sess->sess_kref)) {
770                         ql_dbg(ql_dbg_disc, vha, 0x2107,
771                             "%s: kref_get fail sess %8phC \n",
772                             __func__, sess->port_name);
773                         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
774                         return;
775                 }
776
777                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
778                     "qla_target(%u): %ssession for port %8phC "
779                     "(loop ID %d) reappeared\n", vha->vp_idx,
780                     sess->local ? "local " : "", sess->port_name, sess->loop_id);
781
782                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
783                     "Reappeared sess %p\n", sess);
784
785                 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
786                     fcport->loop_id,
787                     (fcport->flags & FCF_CONF_COMP_SUPPORTED));
788         }
789
790         if (sess && sess->local) {
791                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
792                     "qla_target(%u): local session for "
793                     "port %8phC (loop ID %d) became global\n", vha->vp_idx,
794                     fcport->port_name, sess->loop_id);
795                 sess->local = 0;
796         }
797         ha->tgt.tgt_ops->put_sess(sess);
798         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
799 }
800
801 /*
802  * This is a zero-base ref-counting solution, since hardware_lock
803  * guarantees that ref_count is not modified concurrently.
804  * Upon successful return content of iocb is undefined
805  */
806 static struct qlt_plogi_ack_t *
807 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
808                        struct imm_ntfy_from_isp *iocb)
809 {
810         struct qlt_plogi_ack_t *pla;
811
812         list_for_each_entry(pla, &vha->plogi_ack_list, list) {
813                 if (pla->id.b24 == id->b24) {
814                         qlt_send_term_imm_notif(vha, &pla->iocb, 1);
815                         memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
816                         return pla;
817                 }
818         }
819
820         pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
821         if (!pla) {
822                 ql_dbg(ql_dbg_async, vha, 0x5088,
823                        "qla_target(%d): Allocation of plogi_ack failed\n",
824                        vha->vp_idx);
825                 return NULL;
826         }
827
828         memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
829         pla->id = *id;
830         list_add_tail(&pla->list, &vha->plogi_ack_list);
831
832         return pla;
833 }
834
835 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
836     struct qlt_plogi_ack_t *pla)
837 {
838         struct imm_ntfy_from_isp *iocb = &pla->iocb;
839         port_id_t port_id;
840         uint16_t loop_id;
841         fc_port_t *fcport = pla->fcport;
842
843         BUG_ON(!pla->ref_count);
844         pla->ref_count--;
845
846         if (pla->ref_count)
847                 return;
848
849         ql_dbg(ql_dbg_disc, vha, 0x5089,
850             "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
851             " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
852             iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
853             iocb->u.isp24.port_id[0],
854             le16_to_cpu(iocb->u.isp24.nport_handle),
855             iocb->u.isp24.exchange_address, iocb->ox_id);
856
857         port_id.b.domain = iocb->u.isp24.port_id[2];
858         port_id.b.area   = iocb->u.isp24.port_id[1];
859         port_id.b.al_pa  = iocb->u.isp24.port_id[0];
860         port_id.b.rsvd_1 = 0;
861
862         loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
863
864         fcport->loop_id = loop_id;
865         fcport->d_id = port_id;
866         if (iocb->u.isp24.status_subcode == ELS_PLOGI)
867                 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
868         else
869                 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
870
871         list_for_each_entry(fcport, &vha->vp_fcports, list) {
872                 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
873                         fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
874                 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
875                         fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
876         }
877
878         list_del(&pla->list);
879         kmem_cache_free(qla_tgt_plogi_cachep, pla);
880 }
881
882 void
883 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
884     struct fc_port *sess, enum qlt_plogi_link_t link)
885 {
886         struct imm_ntfy_from_isp *iocb = &pla->iocb;
887         /* Inc ref_count first because link might already be pointing at pla */
888         pla->ref_count++;
889
890         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
891                 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
892                 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
893                 sess, link, sess->port_name,
894                 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
895                 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
896                 pla->ref_count, pla, link);
897
898         if (link == QLT_PLOGI_LINK_CONFLICT) {
899                 switch (sess->disc_state) {
900                 case DSC_DELETED:
901                 case DSC_DELETE_PEND:
902                         pla->ref_count--;
903                         return;
904                 default:
905                         break;
906                 }
907         }
908
909         if (sess->plogi_link[link])
910                 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
911
912         if (link == QLT_PLOGI_LINK_SAME_WWN)
913                 pla->fcport = sess;
914
915         sess->plogi_link[link] = pla;
916 }
917
918 typedef struct {
919         /* These fields must be initialized by the caller */
920         port_id_t id;
921         /*
922          * number of cmds dropped while we were waiting for
923          * initiator to ack LOGO initialize to 1 if LOGO is
924          * triggered by a command, otherwise, to 0
925          */
926         int cmd_count;
927
928         /* These fields are used by callee */
929         struct list_head list;
930 } qlt_port_logo_t;
931
932 static void
933 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
934 {
935         qlt_port_logo_t *tmp;
936         int res;
937
938         mutex_lock(&vha->vha_tgt.tgt_mutex);
939
940         list_for_each_entry(tmp, &vha->logo_list, list) {
941                 if (tmp->id.b24 == logo->id.b24) {
942                         tmp->cmd_count += logo->cmd_count;
943                         mutex_unlock(&vha->vha_tgt.tgt_mutex);
944                         return;
945                 }
946         }
947
948         list_add_tail(&logo->list, &vha->logo_list);
949
950         mutex_unlock(&vha->vha_tgt.tgt_mutex);
951
952         res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
953
954         mutex_lock(&vha->vha_tgt.tgt_mutex);
955         list_del(&logo->list);
956         mutex_unlock(&vha->vha_tgt.tgt_mutex);
957
958         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
959             "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
960             logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
961             logo->cmd_count, res);
962 }
963
964 void qlt_free_session_done(struct work_struct *work)
965 {
966         struct fc_port *sess = container_of(work, struct fc_port,
967             free_work);
968         struct qla_tgt *tgt = sess->tgt;
969         struct scsi_qla_host *vha = sess->vha;
970         struct qla_hw_data *ha = vha->hw;
971         unsigned long flags;
972         bool logout_started = false;
973         scsi_qla_host_t *base_vha;
974         struct qlt_plogi_ack_t *own =
975                 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
976
977         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
978                 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
979                 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
980                 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
981                 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
982                 sess->logout_on_delete, sess->keep_nport_handle,
983                 sess->send_els_logo);
984
985         if (!IS_SW_RESV_ADDR(sess->d_id)) {
986                 if (sess->send_els_logo) {
987                         qlt_port_logo_t logo;
988
989                         logo.id = sess->d_id;
990                         logo.cmd_count = 0;
991                         sess->send_els_logo = 0;
992                         qlt_send_first_logo(vha, &logo);
993                 }
994
995                 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
996                         int rc;
997
998                         if (!own ||
999                             (own &&
1000                              (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
1001                                 rc = qla2x00_post_async_logout_work(vha, sess,
1002                                     NULL);
1003                                 if (rc != QLA_SUCCESS)
1004                                         ql_log(ql_log_warn, vha, 0xf085,
1005                                             "Schedule logo failed sess %p rc %d\n",
1006                                             sess, rc);
1007                                 else
1008                                         logout_started = true;
1009                         } else if (own && (own->iocb.u.isp24.status_subcode ==
1010                                 ELS_PRLI) && ha->flags.rida_fmt2) {
1011                                 rc = qla2x00_post_async_prlo_work(vha, sess,
1012                                     NULL);
1013                                 if (rc != QLA_SUCCESS)
1014                                         ql_log(ql_log_warn, vha, 0xf085,
1015                                             "Schedule PRLO failed sess %p rc %d\n",
1016                                             sess, rc);
1017                                 else
1018                                         logout_started = true;
1019                         }
1020                 }
1021         }
1022
1023         /*
1024          * Release the target session for FC Nexus from fabric module code.
1025          */
1026         if (sess->se_sess != NULL)
1027                 ha->tgt.tgt_ops->free_session(sess);
1028
1029         if (logout_started) {
1030                 bool traced = false;
1031
1032                 while (!READ_ONCE(sess->logout_completed)) {
1033                         if (!traced) {
1034                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
1035                                         "%s: waiting for sess %p logout\n",
1036                                         __func__, sess);
1037                                 traced = true;
1038                         }
1039                         msleep(100);
1040                 }
1041
1042                 ql_dbg(ql_dbg_disc, vha, 0xf087,
1043                     "%s: sess %p logout completed\n", __func__, sess);
1044         }
1045
1046         if (sess->logo_ack_needed) {
1047                 sess->logo_ack_needed = 0;
1048                 qla24xx_async_notify_ack(vha, sess,
1049                         (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1050         }
1051
1052         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1053         if (sess->se_sess) {
1054                 sess->se_sess = NULL;
1055                 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1056                         tgt->sess_count--;
1057         }
1058
1059         sess->disc_state = DSC_DELETED;
1060         sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1061         sess->deleted = QLA_SESS_DELETED;
1062         sess->login_retry = vha->hw->login_retry_count;
1063
1064         if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1065                 vha->fcport_count--;
1066                 sess->login_succ = 0;
1067         }
1068
1069         qla2x00_clear_loop_id(sess);
1070
1071         if (sess->conflict) {
1072                 sess->conflict->login_pause = 0;
1073                 sess->conflict = NULL;
1074                 if (!test_bit(UNLOADING, &vha->dpc_flags))
1075                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1076         }
1077
1078         {
1079                 struct qlt_plogi_ack_t *con =
1080                     sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1081                 struct imm_ntfy_from_isp *iocb;
1082
1083                 if (con) {
1084                         iocb = &con->iocb;
1085                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1086                                  "se_sess %p / sess %p port %8phC is gone,"
1087                                  " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1088                                  sess->se_sess, sess, sess->port_name,
1089                                  own ? "releasing own PLOGI" : "no own PLOGI pending",
1090                                  own ? own->ref_count : -1,
1091                                  iocb->u.isp24.port_name, con->ref_count);
1092                         qlt_plogi_ack_unref(vha, con);
1093                         sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1094                 } else {
1095                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1096                             "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1097                             sess->se_sess, sess, sess->port_name,
1098                             own ? "releasing own PLOGI" :
1099                             "no own PLOGI pending",
1100                             own ? own->ref_count : -1);
1101                 }
1102
1103                 if (own) {
1104                         sess->fw_login_state = DSC_LS_PLOGI_PEND;
1105                         qlt_plogi_ack_unref(vha, own);
1106                         sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1107                 }
1108         }
1109
1110         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1111
1112         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1113             "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1114                 sess, sess->port_name, vha->fcport_count);
1115
1116         if (tgt && (tgt->sess_count == 0))
1117                 wake_up_all(&tgt->waitQ);
1118
1119         if (vha->fcport_count == 0)
1120                 wake_up_all(&vha->fcport_waitQ);
1121
1122         base_vha = pci_get_drvdata(ha->pdev);
1123
1124         sess->free_pending = 0;
1125
1126         if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
1127                 return;
1128
1129         if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1130                 switch (vha->host->active_mode) {
1131                 case MODE_INITIATOR:
1132                 case MODE_DUAL:
1133                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1134                         qla2xxx_wake_dpc(vha);
1135                         break;
1136                 case MODE_TARGET:
1137                 default:
1138                         /* no-op */
1139                         break;
1140                 }
1141         }
1142 }
1143
1144 /* ha->tgt.sess_lock supposed to be held on entry */
1145 void qlt_unreg_sess(struct fc_port *sess)
1146 {
1147         struct scsi_qla_host *vha = sess->vha;
1148         unsigned long flags;
1149
1150         ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1151             "%s sess %p for deletion %8phC\n",
1152             __func__, sess, sess->port_name);
1153
1154         spin_lock_irqsave(&sess->vha->work_lock, flags);
1155         if (sess->free_pending) {
1156                 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1157                 return;
1158         }
1159         sess->free_pending = 1;
1160         spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1161
1162         if (sess->se_sess)
1163                 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1164
1165         qla2x00_mark_device_lost(vha, sess, 1, 1);
1166
1167         sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1168         sess->disc_state = DSC_DELETE_PEND;
1169         sess->last_rscn_gen = sess->rscn_gen;
1170         sess->last_login_gen = sess->login_gen;
1171
1172         if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1173             !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1174                 sess->nvme_flag |= NVME_FLAG_DELETING;
1175                 schedule_work(&sess->nvme_del_work);
1176         } else {
1177                 INIT_WORK(&sess->free_work, qlt_free_session_done);
1178                 schedule_work(&sess->free_work);
1179         }
1180 }
1181 EXPORT_SYMBOL(qlt_unreg_sess);
1182
1183 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1184 {
1185         struct qla_hw_data *ha = vha->hw;
1186         struct fc_port *sess = NULL;
1187         uint16_t loop_id;
1188         int res = 0;
1189         struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1190         unsigned long flags;
1191
1192         loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1193         if (loop_id == 0xFFFF) {
1194                 /* Global event */
1195                 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1196                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1197                 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1198                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1199         } else {
1200                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1201                 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1202                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1203         }
1204
1205         ql_dbg(ql_dbg_tgt, vha, 0xe000,
1206             "Using sess for qla_tgt_reset: %p\n", sess);
1207         if (!sess) {
1208                 res = -ESRCH;
1209                 return res;
1210         }
1211
1212         ql_dbg(ql_dbg_tgt, vha, 0xe047,
1213             "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1214             "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1215             mcmd, loop_id);
1216
1217         return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1218 }
1219
1220 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1221 {
1222         if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1223                 sess->logout_on_delete = 0;
1224                 sess->logo_ack_needed = 0;
1225                 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1226                 sess->scan_state = 0;
1227         }
1228 }
1229
1230 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1231 {
1232         struct qla_tgt *tgt = sess->tgt;
1233         struct qla_hw_data *ha = sess->vha->hw;
1234         unsigned long flags;
1235
1236         if (sess->disc_state == DSC_DELETE_PEND)
1237                 return;
1238
1239         if (sess->disc_state == DSC_DELETED) {
1240                 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
1241                         wake_up_all(&tgt->waitQ);
1242                 if (sess->vha->fcport_count == 0)
1243                         wake_up_all(&sess->vha->fcport_waitQ);
1244
1245                 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1246                         !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1247                         return;
1248         }
1249
1250         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1251         if (sess->deleted == QLA_SESS_DELETED)
1252                 sess->logout_on_delete = 0;
1253
1254         if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1255                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1256                 return;
1257         }
1258         sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1259         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1260
1261         sess->disc_state = DSC_DELETE_PEND;
1262
1263         qla24xx_chk_fcp_state(sess);
1264
1265         ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1266             "Scheduling sess %p for deletion\n", sess);
1267
1268         INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1269         WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1270 }
1271
1272 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1273 {
1274         struct fc_port *sess;
1275         scsi_qla_host_t *vha = tgt->vha;
1276
1277         list_for_each_entry(sess, &vha->vp_fcports, list) {
1278                 if (sess->se_sess)
1279                         qlt_schedule_sess_for_deletion(sess);
1280         }
1281
1282         /* At this point tgt could be already dead */
1283 }
1284
1285 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
1286         uint16_t *loop_id)
1287 {
1288         struct qla_hw_data *ha = vha->hw;
1289         dma_addr_t gid_list_dma;
1290         struct gid_list_info *gid_list;
1291         char *id_iter;
1292         int res, rc, i;
1293         uint16_t entries;
1294
1295         gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1296             &gid_list_dma, GFP_KERNEL);
1297         if (!gid_list) {
1298                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1299                     "qla_target(%d): DMA Alloc failed of %u\n",
1300                     vha->vp_idx, qla2x00_gid_list_size(ha));
1301                 return -ENOMEM;
1302         }
1303
1304         /* Get list of logged in devices */
1305         rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1306         if (rc != QLA_SUCCESS) {
1307                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1308                     "qla_target(%d): get_id_list() failed: %x\n",
1309                     vha->vp_idx, rc);
1310                 res = -EBUSY;
1311                 goto out_free_id_list;
1312         }
1313
1314         id_iter = (char *)gid_list;
1315         res = -ENOENT;
1316         for (i = 0; i < entries; i++) {
1317                 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
1318                 if ((gid->al_pa == s_id[2]) &&
1319                     (gid->area == s_id[1]) &&
1320                     (gid->domain == s_id[0])) {
1321                         *loop_id = le16_to_cpu(gid->loop_id);
1322                         res = 0;
1323                         break;
1324                 }
1325                 id_iter += ha->gid_list_info_size;
1326         }
1327
1328 out_free_id_list:
1329         dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1330             gid_list, gid_list_dma);
1331         return res;
1332 }
1333
1334 /*
1335  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1336  * Caller must put it.
1337  */
1338 static struct fc_port *qlt_create_sess(
1339         struct scsi_qla_host *vha,
1340         fc_port_t *fcport,
1341         bool local)
1342 {
1343         struct qla_hw_data *ha = vha->hw;
1344         struct fc_port *sess = fcport;
1345         unsigned long flags;
1346
1347         if (vha->vha_tgt.qla_tgt->tgt_stop)
1348                 return NULL;
1349
1350         if (fcport->se_sess) {
1351                 if (!kref_get_unless_zero(&sess->sess_kref)) {
1352                         ql_dbg(ql_dbg_disc, vha, 0x20f6,
1353                             "%s: kref_get_unless_zero failed for %8phC\n",
1354                             __func__, sess->port_name);
1355                         return NULL;
1356                 }
1357                 return fcport;
1358         }
1359         sess->tgt = vha->vha_tgt.qla_tgt;
1360         sess->local = local;
1361
1362         /*
1363          * Under normal circumstances we want to logout from firmware when
1364          * session eventually ends and release corresponding nport handle.
1365          * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1366          * code will adjust these flags as necessary.
1367          */
1368         sess->logout_on_delete = 1;
1369         sess->keep_nport_handle = 0;
1370         sess->logout_completed = 0;
1371
1372         if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1373             &fcport->port_name[0], sess) < 0) {
1374                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1375                     "(%d) %8phC check_initiator_node_acl failed\n",
1376                     vha->vp_idx, fcport->port_name);
1377                 return NULL;
1378         } else {
1379                 kref_init(&fcport->sess_kref);
1380                 /*
1381                  * Take an extra reference to ->sess_kref here to handle
1382                  * fc_port access across ->tgt.sess_lock reaquire.
1383                  */
1384                 if (!kref_get_unless_zero(&sess->sess_kref)) {
1385                         ql_dbg(ql_dbg_disc, vha, 0x20f7,
1386                             "%s: kref_get_unless_zero failed for %8phC\n",
1387                             __func__, sess->port_name);
1388                         return NULL;
1389                 }
1390
1391                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1392                 if (!IS_SW_RESV_ADDR(sess->d_id))
1393                         vha->vha_tgt.qla_tgt->sess_count++;
1394
1395                 qlt_do_generation_tick(vha, &sess->generation);
1396                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1397         }
1398
1399         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1400             "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
1401             sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1402             vha->vha_tgt.qla_tgt->sess_count);
1403
1404         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1405             "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1406             "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1407             vha->vp_idx, local ?  "local " : "", fcport->port_name,
1408             fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1409             sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1410
1411         return sess;
1412 }
1413
1414 /*
1415  * max_gen - specifies maximum session generation
1416  * at which this deletion requestion is still valid
1417  */
1418 void
1419 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1420 {
1421         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1422         struct fc_port *sess = fcport;
1423         unsigned long flags;
1424
1425         if (!vha->hw->tgt.tgt_ops)
1426                 return;
1427
1428         if (!tgt)
1429                 return;
1430
1431         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1432         if (tgt->tgt_stop) {
1433                 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1434                 return;
1435         }
1436         if (!sess->se_sess) {
1437                 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1438                 return;
1439         }
1440
1441         if (max_gen - sess->generation < 0) {
1442                 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1443                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1444                     "Ignoring stale deletion request for se_sess %p / sess %p"
1445                     " for port %8phC, req_gen %d, sess_gen %d\n",
1446                     sess->se_sess, sess, sess->port_name, max_gen,
1447                     sess->generation);
1448                 return;
1449         }
1450
1451         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1452
1453         sess->local = 1;
1454         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1455         qlt_schedule_sess_for_deletion(sess);
1456 }
1457
1458 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1459 {
1460         struct qla_hw_data *ha = tgt->ha;
1461         unsigned long flags;
1462         int res;
1463         /*
1464          * We need to protect against race, when tgt is freed before or
1465          * inside wake_up()
1466          */
1467         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1468         ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1469             "tgt %p, sess_count=%d\n",
1470             tgt, tgt->sess_count);
1471         res = (tgt->sess_count == 0);
1472         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1473
1474         return res;
1475 }
1476
1477 /* Called by tcm_qla2xxx configfs code */
1478 int qlt_stop_phase1(struct qla_tgt *tgt)
1479 {
1480         struct scsi_qla_host *vha = tgt->vha;
1481         struct qla_hw_data *ha = tgt->ha;
1482         unsigned long flags;
1483
1484         mutex_lock(&qla_tgt_mutex);
1485         if (!vha->fc_vport) {
1486                 struct Scsi_Host *sh = vha->host;
1487                 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1488                 bool npiv_vports;
1489
1490                 spin_lock_irqsave(sh->host_lock, flags);
1491                 npiv_vports = (fc_host->npiv_vports_inuse);
1492                 spin_unlock_irqrestore(sh->host_lock, flags);
1493
1494                 if (npiv_vports) {
1495                         mutex_unlock(&qla_tgt_mutex);
1496                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
1497                             "NPIV is in use. Can not stop target\n");
1498                         return -EPERM;
1499                 }
1500         }
1501         if (tgt->tgt_stop || tgt->tgt_stopped) {
1502                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1503                     "Already in tgt->tgt_stop or tgt_stopped state\n");
1504                 mutex_unlock(&qla_tgt_mutex);
1505                 return -EPERM;
1506         }
1507
1508         ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1509             vha->host_no, vha);
1510         /*
1511          * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1512          * Lock is needed, because we still can get an incoming packet.
1513          */
1514         mutex_lock(&vha->vha_tgt.tgt_mutex);
1515         tgt->tgt_stop = 1;
1516         qlt_clear_tgt_db(tgt);
1517         mutex_unlock(&vha->vha_tgt.tgt_mutex);
1518         mutex_unlock(&qla_tgt_mutex);
1519
1520         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1521             "Waiting for sess works (tgt %p)", tgt);
1522         spin_lock_irqsave(&tgt->sess_work_lock, flags);
1523         while (!list_empty(&tgt->sess_works_list)) {
1524                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1525                 flush_scheduled_work();
1526                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1527         }
1528         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1529
1530         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1531             "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1532
1533         wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1534
1535         /* Big hammer */
1536         if (!ha->flags.host_shutting_down &&
1537             (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1538                 qlt_disable_vha(vha);
1539
1540         /* Wait for sessions to clear out (just in case) */
1541         wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1542         return 0;
1543 }
1544 EXPORT_SYMBOL(qlt_stop_phase1);
1545
1546 /* Called by tcm_qla2xxx configfs code */
1547 void qlt_stop_phase2(struct qla_tgt *tgt)
1548 {
1549         scsi_qla_host_t *vha = tgt->vha;
1550
1551         if (tgt->tgt_stopped) {
1552                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1553                     "Already in tgt->tgt_stopped state\n");
1554                 dump_stack();
1555                 return;
1556         }
1557         if (!tgt->tgt_stop) {
1558                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1559                     "%s: phase1 stop is not completed\n", __func__);
1560                 dump_stack();
1561                 return;
1562         }
1563
1564         mutex_lock(&vha->vha_tgt.tgt_mutex);
1565         tgt->tgt_stop = 0;
1566         tgt->tgt_stopped = 1;
1567         mutex_unlock(&vha->vha_tgt.tgt_mutex);
1568
1569         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1570             tgt);
1571 }
1572 EXPORT_SYMBOL(qlt_stop_phase2);
1573
1574 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1575 static void qlt_release(struct qla_tgt *tgt)
1576 {
1577         scsi_qla_host_t *vha = tgt->vha;
1578         void *node;
1579         u64 key = 0;
1580         u16 i;
1581         struct qla_qpair_hint *h;
1582         struct qla_hw_data *ha = vha->hw;
1583
1584         if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
1585             !tgt->tgt_stopped)
1586                 qlt_stop_phase1(tgt);
1587
1588         if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1589                 qlt_stop_phase2(tgt);
1590
1591         for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1592                 unsigned long flags;
1593
1594                 h = &tgt->qphints[i];
1595                 if (h->qpair) {
1596                         spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1597                         list_del(&h->hint_elem);
1598                         spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1599                         h->qpair = NULL;
1600                 }
1601         }
1602         kfree(tgt->qphints);
1603         mutex_lock(&qla_tgt_mutex);
1604         list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1605         mutex_unlock(&qla_tgt_mutex);
1606
1607         btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1608                 btree_remove64(&tgt->lun_qpair_map, key);
1609
1610         btree_destroy64(&tgt->lun_qpair_map);
1611
1612         if (vha->vp_idx)
1613                 if (ha->tgt.tgt_ops &&
1614                     ha->tgt.tgt_ops->remove_target &&
1615                     vha->vha_tgt.target_lport_ptr)
1616                         ha->tgt.tgt_ops->remove_target(vha);
1617
1618         vha->vha_tgt.qla_tgt = NULL;
1619
1620         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1621             "Release of tgt %p finished\n", tgt);
1622
1623         kfree(tgt);
1624 }
1625
1626 /* ha->hardware_lock supposed to be held on entry */
1627 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1628         const void *param, unsigned int param_size)
1629 {
1630         struct qla_tgt_sess_work_param *prm;
1631         unsigned long flags;
1632
1633         prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1634         if (!prm) {
1635                 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1636                     "qla_target(%d): Unable to create session "
1637                     "work, command will be refused", 0);
1638                 return -ENOMEM;
1639         }
1640
1641         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1642             "Scheduling work (type %d, prm %p)"
1643             " to find session for param %p (size %d, tgt %p)\n",
1644             type, prm, param, param_size, tgt);
1645
1646         prm->type = type;
1647         memcpy(&prm->tm_iocb, param, param_size);
1648
1649         spin_lock_irqsave(&tgt->sess_work_lock, flags);
1650         list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1651         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1652
1653         schedule_work(&tgt->sess_work);
1654
1655         return 0;
1656 }
1657
1658 /*
1659  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1660  */
1661 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1662         struct imm_ntfy_from_isp *ntfy,
1663         uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1664         uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1665 {
1666         struct scsi_qla_host *vha = qpair->vha;
1667         struct qla_hw_data *ha = vha->hw;
1668         request_t *pkt;
1669         struct nack_to_isp *nack;
1670
1671         if (!ha->flags.fw_started)
1672                 return;
1673
1674         ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1675
1676         pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1677         if (!pkt) {
1678                 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1679                     "qla_target(%d): %s failed: unable to allocate "
1680                     "request packet\n", vha->vp_idx, __func__);
1681                 return;
1682         }
1683
1684         if (vha->vha_tgt.qla_tgt != NULL)
1685                 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1686
1687         pkt->entry_type = NOTIFY_ACK_TYPE;
1688         pkt->entry_count = 1;
1689
1690         nack = (struct nack_to_isp *)pkt;
1691         nack->ox_id = ntfy->ox_id;
1692
1693         nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1694         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1695         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1696                 nack->u.isp24.flags = ntfy->u.isp24.flags &
1697                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1698         }
1699         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1700         nack->u.isp24.status = ntfy->u.isp24.status;
1701         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1702         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1703         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1704         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1705         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1706         nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1707         nack->u.isp24.srr_reject_code = srr_reject_code;
1708         nack->u.isp24.srr_reject_code_expl = srr_explan;
1709         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1710
1711         ql_dbg(ql_dbg_tgt, vha, 0xe005,
1712             "qla_target(%d): Sending 24xx Notify Ack %d\n",
1713             vha->vp_idx, nack->u.isp24.status);
1714
1715         /* Memory Barrier */
1716         wmb();
1717         qla2x00_start_iocbs(vha, qpair->req);
1718 }
1719
1720 /*
1721  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1722  */
1723 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1724         struct abts_recv_from_24xx *abts, uint32_t status,
1725         bool ids_reversed)
1726 {
1727         struct scsi_qla_host *vha = qpair->vha;
1728         struct qla_hw_data *ha = vha->hw;
1729         struct abts_resp_to_24xx *resp;
1730         uint32_t f_ctl;
1731         uint8_t *p;
1732
1733         ql_dbg(ql_dbg_tgt, vha, 0xe006,
1734             "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1735             ha, abts, status);
1736
1737         resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1738             NULL);
1739         if (!resp) {
1740                 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1741                     "qla_target(%d): %s failed: unable to allocate "
1742                     "request packet", vha->vp_idx, __func__);
1743                 return;
1744         }
1745
1746         resp->entry_type = ABTS_RESP_24XX;
1747         resp->entry_count = 1;
1748         resp->nport_handle = abts->nport_handle;
1749         resp->vp_index = vha->vp_idx;
1750         resp->sof_type = abts->sof_type;
1751         resp->exchange_address = abts->exchange_address;
1752         resp->fcp_hdr_le = abts->fcp_hdr_le;
1753         f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1754             F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1755             F_CTL_SEQ_INITIATIVE);
1756         p = (uint8_t *)&f_ctl;
1757         resp->fcp_hdr_le.f_ctl[0] = *p++;
1758         resp->fcp_hdr_le.f_ctl[1] = *p++;
1759         resp->fcp_hdr_le.f_ctl[2] = *p;
1760         if (ids_reversed) {
1761                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1762                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1763                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1764                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1765                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1766                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1767         } else {
1768                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1769                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1770                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1771                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1772                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1773                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1774         }
1775         resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1776         if (status == FCP_TMF_CMPL) {
1777                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1778                 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1779                 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1780                 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1781                 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1782                 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1783         } else {
1784                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1785                 resp->payload.ba_rjt.reason_code =
1786                         BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1787                 /* Other bytes are zero */
1788         }
1789
1790         vha->vha_tgt.qla_tgt->abts_resp_expected++;
1791
1792         /* Memory Barrier */
1793         wmb();
1794         if (qpair->reqq_start_iocbs)
1795                 qpair->reqq_start_iocbs(qpair);
1796         else
1797                 qla2x00_start_iocbs(vha, qpair->req);
1798 }
1799
1800 /*
1801  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1802  */
1803 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1804         struct abts_resp_from_24xx_fw *entry)
1805 {
1806         struct ctio7_to_24xx *ctio;
1807
1808         ql_dbg(ql_dbg_tgt, vha, 0xe007,
1809             "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1810
1811         ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
1812             vha->hw->base_qpair, NULL);
1813         if (ctio == NULL) {
1814                 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1815                     "qla_target(%d): %s failed: unable to allocate "
1816                     "request packet\n", vha->vp_idx, __func__);
1817                 return;
1818         }
1819
1820         /*
1821          * We've got on entrance firmware's response on by us generated
1822          * ABTS response. So, in it ID fields are reversed.
1823          */
1824
1825         ctio->entry_type = CTIO_TYPE7;
1826         ctio->entry_count = 1;
1827         ctio->nport_handle = entry->nport_handle;
1828         ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
1829         ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1830         ctio->vp_index = vha->vp_idx;
1831         ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1832         ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1833         ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1834         ctio->exchange_addr = entry->exchange_addr_to_abort;
1835         ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1836                                             CTIO7_FLAGS_TERMINATE);
1837         ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1838
1839         /* Memory Barrier */
1840         wmb();
1841         qla2x00_start_iocbs(vha, vha->req);
1842
1843         qlt_24xx_send_abts_resp(vha->hw->base_qpair,
1844             (struct abts_recv_from_24xx *)entry,
1845             FCP_TMF_CMPL, true);
1846 }
1847
1848 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1849 {
1850         struct qla_tgt_sess_op *op;
1851         struct qla_tgt_cmd *cmd;
1852         unsigned long flags;
1853
1854         spin_lock_irqsave(&vha->cmd_list_lock, flags);
1855         list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1856                 if (tag == op->atio.u.isp24.exchange_addr) {
1857                         op->aborted = true;
1858                         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1859                         return 1;
1860                 }
1861         }
1862
1863         list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1864                 if (tag == op->atio.u.isp24.exchange_addr) {
1865                         op->aborted = true;
1866                         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1867                         return 1;
1868                 }
1869         }
1870
1871         list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1872                 if (tag == cmd->atio.u.isp24.exchange_addr) {
1873                         cmd->aborted = 1;
1874                         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1875                         return 1;
1876                 }
1877         }
1878         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1879
1880         return 0;
1881 }
1882
1883 /* drop cmds for the given lun
1884  * XXX only looks for cmds on the port through which lun reset was recieved
1885  * XXX does not go through the list of other port (which may have cmds
1886  *     for the same lun)
1887  */
1888 static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1889                                 u64 lun, uint8_t *s_id)
1890 {
1891         struct qla_tgt_sess_op *op;
1892         struct qla_tgt_cmd *cmd;
1893         uint32_t key;
1894         unsigned long flags;
1895
1896         key = sid_to_key(s_id);
1897         spin_lock_irqsave(&vha->cmd_list_lock, flags);
1898         list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1899                 uint32_t op_key;
1900                 u64 op_lun;
1901
1902                 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1903                 op_lun = scsilun_to_int(
1904                         (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1905                 if (op_key == key && op_lun == lun)
1906                         op->aborted = true;
1907         }
1908
1909         list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1910                 uint32_t op_key;
1911                 u64 op_lun;
1912
1913                 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1914                 op_lun = scsilun_to_int(
1915                         (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1916                 if (op_key == key && op_lun == lun)
1917                         op->aborted = true;
1918         }
1919
1920         list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1921                 uint32_t cmd_key;
1922                 u64 cmd_lun;
1923
1924                 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1925                 cmd_lun = scsilun_to_int(
1926                         (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1927                 if (cmd_key == key && cmd_lun == lun)
1928                         cmd->aborted = 1;
1929         }
1930         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1931 }
1932
1933 /* ha->hardware_lock supposed to be held on entry */
1934 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1935         struct abts_recv_from_24xx *abts, struct fc_port *sess)
1936 {
1937         struct qla_hw_data *ha = vha->hw;
1938         struct qla_tgt_mgmt_cmd *mcmd;
1939         int rc;
1940
1941         if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1942                 /* send TASK_ABORT response immediately */
1943                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
1944                 return 0;
1945         }
1946
1947         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1948             "qla_target(%d): task abort (tag=%d)\n",
1949             vha->vp_idx, abts->exchange_addr_to_abort);
1950
1951         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1952         if (mcmd == NULL) {
1953                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1954                     "qla_target(%d): %s: Allocation of ABORT cmd failed",
1955                     vha->vp_idx, __func__);
1956                 return -ENOMEM;
1957         }
1958         memset(mcmd, 0, sizeof(*mcmd));
1959
1960         mcmd->sess = sess;
1961         memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1962         mcmd->reset_count = ha->base_qpair->chip_reset;
1963         mcmd->tmr_func = QLA_TGT_ABTS;
1964         mcmd->qpair = ha->base_qpair;
1965         mcmd->vha = vha;
1966
1967         /*
1968          * LUN is looked up by target-core internally based on the passed
1969          * abts->exchange_addr_to_abort tag.
1970          */
1971         rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
1972             abts->exchange_addr_to_abort);
1973         if (rc != 0) {
1974                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1975                     "qla_target(%d):  tgt_ops->handle_tmr()"
1976                     " failed: %d", vha->vp_idx, rc);
1977                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1978                 return -EFAULT;
1979         }
1980
1981         return 0;
1982 }
1983
1984 /*
1985  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1986  */
1987 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1988         struct abts_recv_from_24xx *abts)
1989 {
1990         struct qla_hw_data *ha = vha->hw;
1991         struct fc_port *sess;
1992         uint32_t tag = abts->exchange_addr_to_abort;
1993         uint8_t s_id[3];
1994         int rc;
1995         unsigned long flags;
1996
1997         if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1998                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1999                     "qla_target(%d): ABTS: Abort Sequence not "
2000                     "supported\n", vha->vp_idx);
2001                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2002                     false);
2003                 return;
2004         }
2005
2006         if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2007                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2008                     "qla_target(%d): ABTS: Unknown Exchange "
2009                     "Address received\n", vha->vp_idx);
2010                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2011                     false);
2012                 return;
2013         }
2014
2015         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2016             "qla_target(%d): task abort (s_id=%x:%x:%x, "
2017             "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
2018             abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
2019             le32_to_cpu(abts->fcp_hdr_le.parameter));
2020
2021         s_id[0] = abts->fcp_hdr_le.s_id[2];
2022         s_id[1] = abts->fcp_hdr_le.s_id[1];
2023         s_id[2] = abts->fcp_hdr_le.s_id[0];
2024
2025         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2026         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2027         if (!sess) {
2028                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2029                     "qla_target(%d): task abort for non-existent session\n",
2030                     vha->vp_idx);
2031                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2032
2033                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2034                             false);
2035                 return;
2036         }
2037         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2038
2039
2040         if (sess->deleted) {
2041                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2042                     false);
2043                 return;
2044         }
2045
2046         rc = __qlt_24xx_handle_abts(vha, abts, sess);
2047         if (rc != 0) {
2048                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2049                     "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2050                     vha->vp_idx, rc);
2051                 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2052                     false);
2053                 return;
2054         }
2055 }
2056
2057 /*
2058  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2059  */
2060 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2061         struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2062 {
2063         struct scsi_qla_host *ha = mcmd->vha;
2064         struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2065         struct ctio7_to_24xx *ctio;
2066         uint16_t temp;
2067
2068         ql_dbg(ql_dbg_tgt, ha, 0xe008,
2069             "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2070             ha, atio, resp_code);
2071
2072
2073         ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2074         if (ctio == NULL) {
2075                 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2076                     "qla_target(%d): %s failed: unable to allocate "
2077                     "request packet\n", ha->vp_idx, __func__);
2078                 return;
2079         }
2080
2081         ctio->entry_type = CTIO_TYPE7;
2082         ctio->entry_count = 1;
2083         ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2084         ctio->nport_handle = mcmd->sess->loop_id;
2085         ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2086         ctio->vp_index = ha->vp_idx;
2087         ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2088         ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2089         ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2090         ctio->exchange_addr = atio->u.isp24.exchange_addr;
2091         temp = (atio->u.isp24.attr << 9)|
2092                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2093         ctio->u.status1.flags = cpu_to_le16(temp);
2094         temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2095         ctio->u.status1.ox_id = cpu_to_le16(temp);
2096         ctio->u.status1.scsi_status =
2097             cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2098         ctio->u.status1.response_len = cpu_to_le16(8);
2099         ctio->u.status1.sense_data[0] = resp_code;
2100
2101         /* Memory Barrier */
2102         wmb();
2103         if (qpair->reqq_start_iocbs)
2104                 qpair->reqq_start_iocbs(qpair);
2105         else
2106                 qla2x00_start_iocbs(ha, qpair->req);
2107 }
2108
2109 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2110 {
2111         mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2112 }
2113 EXPORT_SYMBOL(qlt_free_mcmd);
2114
2115 /*
2116  * ha->hardware_lock supposed to be held on entry. Might drop it, then
2117  * reacquire
2118  */
2119 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2120     uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2121 {
2122         struct atio_from_isp *atio = &cmd->atio;
2123         struct ctio7_to_24xx *ctio;
2124         uint16_t temp;
2125         struct scsi_qla_host *vha = cmd->vha;
2126
2127         ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2128             "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2129             "sense_key=%02x, asc=%02x, ascq=%02x",
2130             vha, atio, scsi_status, sense_key, asc, ascq);
2131
2132         ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2133         if (!ctio) {
2134                 ql_dbg(ql_dbg_async, vha, 0x3067,
2135                     "qla2x00t(%ld): %s failed: unable to allocate request packet",
2136                     vha->host_no, __func__);
2137                 goto out;
2138         }
2139
2140         ctio->entry_type = CTIO_TYPE7;
2141         ctio->entry_count = 1;
2142         ctio->handle = QLA_TGT_SKIP_HANDLE;
2143         ctio->nport_handle = cmd->sess->loop_id;
2144         ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2145         ctio->vp_index = vha->vp_idx;
2146         ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2147         ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2148         ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2149         ctio->exchange_addr = atio->u.isp24.exchange_addr;
2150         temp = (atio->u.isp24.attr << 9) |
2151             CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2152         ctio->u.status1.flags = cpu_to_le16(temp);
2153         temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2154         ctio->u.status1.ox_id = cpu_to_le16(temp);
2155         ctio->u.status1.scsi_status =
2156             cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2157         ctio->u.status1.response_len = cpu_to_le16(18);
2158         ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2159
2160         if (ctio->u.status1.residual != 0)
2161                 ctio->u.status1.scsi_status |=
2162                     cpu_to_le16(SS_RESIDUAL_UNDER);
2163
2164         /* Response code and sense key */
2165         put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
2166             (&ctio->u.status1.sense_data)[0]);
2167         /* Additional sense length */
2168         put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
2169         /* ASC and ASCQ */
2170         put_unaligned_le32(((asc << 24) | (ascq << 16)),
2171             (&ctio->u.status1.sense_data)[3]);
2172
2173         /* Memory Barrier */
2174         wmb();
2175
2176         if (qpair->reqq_start_iocbs)
2177                 qpair->reqq_start_iocbs(qpair);
2178         else
2179                 qla2x00_start_iocbs(vha, qpair->req);
2180
2181 out:
2182         return;
2183 }
2184
2185 /* callback from target fabric module code */
2186 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2187 {
2188         struct scsi_qla_host *vha = mcmd->sess->vha;
2189         struct qla_hw_data *ha = vha->hw;
2190         unsigned long flags;
2191         struct qla_qpair *qpair = mcmd->qpair;
2192
2193         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2194             "TM response mcmd (%p) status %#x state %#x",
2195             mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2196
2197         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2198
2199         if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2200                 /*
2201                  * Either the port is not online or this request was from
2202                  * previous life, just abort the processing.
2203                  */
2204                 ql_dbg(ql_dbg_async, vha, 0xe100,
2205                         "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2206                         vha->flags.online, qla2x00_reset_active(vha),
2207                         mcmd->reset_count, qpair->chip_reset);
2208                 ha->tgt.tgt_ops->free_mcmd(mcmd);
2209                 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2210                 return;
2211         }
2212
2213         if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2214                 if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2215                     ELS_LOGO ||
2216                     mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2217                     ELS_PRLO ||
2218                     mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2219                     ELS_TPRLO) {
2220                         ql_dbg(ql_dbg_disc, vha, 0x2106,
2221                             "TM response logo %phC status %#x state %#x",
2222                             mcmd->sess->port_name, mcmd->fc_tm_rsp,
2223                             mcmd->flags);
2224                         qlt_schedule_sess_for_deletion(mcmd->sess);
2225                 } else {
2226                         qlt_send_notify_ack(vha->hw->base_qpair,
2227                             &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2228                 }
2229         } else {
2230                 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
2231                         qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
2232                             mcmd->fc_tm_rsp, false);
2233                 else
2234                         qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2235                             mcmd->fc_tm_rsp);
2236         }
2237         /*
2238          * Make the callback for ->free_mcmd() to queue_work() and invoke
2239          * target_put_sess_cmd() to drop cmd_kref to 1.  The final
2240          * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2241          * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2242          * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2243          * qlt_xmit_tm_rsp() returns here..
2244          */
2245         ha->tgt.tgt_ops->free_mcmd(mcmd);
2246         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2247 }
2248 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2249
2250 /* No locks */
2251 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2252 {
2253         struct qla_tgt_cmd *cmd = prm->cmd;
2254
2255         BUG_ON(cmd->sg_cnt == 0);
2256
2257         prm->sg = (struct scatterlist *)cmd->sg;
2258         prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
2259             cmd->sg_cnt, cmd->dma_data_direction);
2260         if (unlikely(prm->seg_cnt == 0))
2261                 goto out_err;
2262
2263         prm->cmd->sg_mapped = 1;
2264
2265         if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2266                 /*
2267                  * If greater than four sg entries then we need to allocate
2268                  * the continuation entries
2269                  */
2270                 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2271                         prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2272                         QLA_TGT_DATASEGS_PER_CMD_24XX,
2273                         QLA_TGT_DATASEGS_PER_CONT_24XX);
2274         } else {
2275                 /* DIF */
2276                 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2277                     (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2278                         prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2279                         prm->tot_dsds = prm->seg_cnt;
2280                 } else
2281                         prm->tot_dsds = prm->seg_cnt;
2282
2283                 if (cmd->prot_sg_cnt) {
2284                         prm->prot_sg      = cmd->prot_sg;
2285                         prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
2286                                 cmd->prot_sg, cmd->prot_sg_cnt,
2287                                 cmd->dma_data_direction);
2288                         if (unlikely(prm->prot_seg_cnt == 0))
2289                                 goto out_err;
2290
2291                         if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2292                             (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2293                                 /* Dif Bundling not support here */
2294                                 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2295                                                                 cmd->blk_sz);
2296                                 prm->tot_dsds += prm->prot_seg_cnt;
2297                         } else
2298                                 prm->tot_dsds += prm->prot_seg_cnt;
2299                 }
2300         }
2301
2302         return 0;
2303
2304 out_err:
2305         ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2306             "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2307             0, prm->cmd->sg_cnt);
2308         return -1;
2309 }
2310
2311 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2312 {
2313         struct qla_hw_data *ha;
2314         struct qla_qpair *qpair;
2315         if (!cmd->sg_mapped)
2316                 return;
2317
2318         qpair = cmd->qpair;
2319
2320         pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
2321             cmd->dma_data_direction);
2322         cmd->sg_mapped = 0;
2323
2324         if (cmd->prot_sg_cnt)
2325                 pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2326                         cmd->dma_data_direction);
2327
2328         if (!cmd->ctx)
2329                 return;
2330         ha = vha->hw;
2331         if (cmd->ctx_dsd_alloced)
2332                 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2333
2334         dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2335 }
2336
2337 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2338         uint32_t req_cnt)
2339 {
2340         uint32_t cnt;
2341         struct req_que *req = qpair->req;
2342
2343         if (req->cnt < (req_cnt + 2)) {
2344                 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2345                     RD_REG_DWORD_RELAXED(req->req_q_out));
2346
2347                 if  (req->ring_index < cnt)
2348                         req->cnt = cnt - req->ring_index;
2349                 else
2350                         req->cnt = req->length - (req->ring_index - cnt);
2351
2352                 if (unlikely(req->cnt < (req_cnt + 2)))
2353                         return -EAGAIN;
2354         }
2355
2356         req->cnt -= req_cnt;
2357
2358         return 0;
2359 }
2360
2361 /*
2362  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2363  */
2364 static inline void *qlt_get_req_pkt(struct req_que *req)
2365 {
2366         /* Adjust ring index. */
2367         req->ring_index++;
2368         if (req->ring_index == req->length) {
2369                 req->ring_index = 0;
2370                 req->ring_ptr = req->ring;
2371         } else {
2372                 req->ring_ptr++;
2373         }
2374         return (cont_entry_t *)req->ring_ptr;
2375 }
2376
2377 /* ha->hardware_lock supposed to be held on entry */
2378 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2379 {
2380         uint32_t h;
2381         int index;
2382         uint8_t found = 0;
2383         struct req_que *req = qpair->req;
2384
2385         h = req->current_outstanding_cmd;
2386
2387         for (index = 1; index < req->num_outstanding_cmds; index++) {
2388                 h++;
2389                 if (h == req->num_outstanding_cmds)
2390                         h = 1;
2391
2392                 if (h == QLA_TGT_SKIP_HANDLE)
2393                         continue;
2394
2395                 if (!req->outstanding_cmds[h]) {
2396                         found = 1;
2397                         break;
2398                 }
2399         }
2400
2401         if (found) {
2402                 req->current_outstanding_cmd = h;
2403         } else {
2404                 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2405                     "qla_target(%d): Ran out of empty cmd slots\n",
2406                     qpair->vha->vp_idx);
2407                 h = QLA_TGT_NULL_HANDLE;
2408         }
2409
2410         return h;
2411 }
2412
2413 /* ha->hardware_lock supposed to be held on entry */
2414 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2415         struct qla_tgt_prm *prm)
2416 {
2417         uint32_t h;
2418         struct ctio7_to_24xx *pkt;
2419         struct atio_from_isp *atio = &prm->cmd->atio;
2420         uint16_t temp;
2421
2422         pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2423         prm->pkt = pkt;
2424         memset(pkt, 0, sizeof(*pkt));
2425
2426         pkt->entry_type = CTIO_TYPE7;
2427         pkt->entry_count = (uint8_t)prm->req_cnt;
2428         pkt->vp_index = prm->cmd->vp_idx;
2429
2430         h = qlt_make_handle(qpair);
2431         if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2432                 /*
2433                  * CTIO type 7 from the firmware doesn't provide a way to
2434                  * know the initiator's LOOP ID, hence we can't find
2435                  * the session and, so, the command.
2436                  */
2437                 return -EAGAIN;
2438         } else
2439                 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2440
2441         pkt->handle = MAKE_HANDLE(qpair->req->id, h);
2442         pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2443         pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2444         pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2445         pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2446         pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2447         pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2448         pkt->exchange_addr = atio->u.isp24.exchange_addr;
2449         temp = atio->u.isp24.attr << 9;
2450         pkt->u.status0.flags |= cpu_to_le16(temp);
2451         temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2452         pkt->u.status0.ox_id = cpu_to_le16(temp);
2453         pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2454
2455         return 0;
2456 }
2457
2458 /*
2459  * ha->hardware_lock supposed to be held on entry. We have already made sure
2460  * that there is sufficient amount of request entries to not drop it.
2461  */
2462 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2463 {
2464         int cnt;
2465         uint32_t *dword_ptr;
2466
2467         /* Build continuation packets */
2468         while (prm->seg_cnt > 0) {
2469                 cont_a64_entry_t *cont_pkt64 =
2470                         (cont_a64_entry_t *)qlt_get_req_pkt(
2471                            prm->cmd->qpair->req);
2472
2473                 /*
2474                  * Make sure that from cont_pkt64 none of
2475                  * 64-bit specific fields used for 32-bit
2476                  * addressing. Cast to (cont_entry_t *) for
2477                  * that.
2478                  */
2479
2480                 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2481
2482                 cont_pkt64->entry_count = 1;
2483                 cont_pkt64->sys_define = 0;
2484
2485                 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2486                 dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
2487
2488                 /* Load continuation entry data segments */
2489                 for (cnt = 0;
2490                     cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2491                     cnt++, prm->seg_cnt--) {
2492                         *dword_ptr++ =
2493                             cpu_to_le32(pci_dma_lo32
2494                                 (sg_dma_address(prm->sg)));
2495                         *dword_ptr++ = cpu_to_le32(pci_dma_hi32
2496                             (sg_dma_address(prm->sg)));
2497                         *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2498
2499                         prm->sg = sg_next(prm->sg);
2500                 }
2501         }
2502 }
2503
2504 /*
2505  * ha->hardware_lock supposed to be held on entry. We have already made sure
2506  * that there is sufficient amount of request entries to not drop it.
2507  */
2508 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2509 {
2510         int cnt;
2511         uint32_t *dword_ptr;
2512         struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2513
2514         pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2515
2516         /* Setup packet address segment pointer */
2517         dword_ptr = pkt24->u.status0.dseg_0_address;
2518
2519         /* Set total data segment count */
2520         if (prm->seg_cnt)
2521                 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2522
2523         if (prm->seg_cnt == 0) {
2524                 /* No data transfer */
2525                 *dword_ptr++ = 0;
2526                 *dword_ptr = 0;
2527                 return;
2528         }
2529
2530         /* If scatter gather */
2531
2532         /* Load command entry data segments */
2533         for (cnt = 0;
2534             (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2535             cnt++, prm->seg_cnt--) {
2536                 *dword_ptr++ =
2537                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2538
2539                 *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
2540                         sg_dma_address(prm->sg)));
2541
2542                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2543
2544                 prm->sg = sg_next(prm->sg);
2545         }
2546
2547         qlt_load_cont_data_segments(prm);
2548 }
2549
2550 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2551 {
2552         return cmd->bufflen > 0;
2553 }
2554
2555 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2556 {
2557         struct qla_tgt_cmd *cmd;
2558         struct scsi_qla_host *vha;
2559
2560         /* asc 0x10=dif error */
2561         if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2562                 cmd = prm->cmd;
2563                 vha = cmd->vha;
2564                 /* ASCQ */
2565                 switch (prm->sense_buffer[13]) {
2566                 case 1:
2567                         ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2568                             "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2569                             "se_cmd=%p tag[%x]",
2570                             cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2571                             cmd->atio.u.isp24.exchange_addr);
2572                         break;
2573                 case 2:
2574                         ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2575                             "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2576                             "se_cmd=%p tag[%x]",
2577                             cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2578                             cmd->atio.u.isp24.exchange_addr);
2579                         break;
2580                 case 3:
2581                         ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2582                             "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2583                             "se_cmd=%p tag[%x]",
2584                             cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2585                             cmd->atio.u.isp24.exchange_addr);
2586                         break;
2587                 default:
2588                         ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2589                             "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2590                             "se_cmd=%p tag[%x]",
2591                             cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2592                             cmd->atio.u.isp24.exchange_addr);
2593                         break;
2594                 }
2595                 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2596         }
2597 }
2598
2599 /*
2600  * Called without ha->hardware_lock held
2601  */
2602 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2603         struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2604         uint32_t *full_req_cnt)
2605 {
2606         struct se_cmd *se_cmd = &cmd->se_cmd;
2607         struct qla_qpair *qpair = cmd->qpair;
2608
2609         prm->cmd = cmd;
2610         prm->tgt = cmd->tgt;
2611         prm->pkt = NULL;
2612         prm->rq_result = scsi_status;
2613         prm->sense_buffer = &cmd->sense_buffer[0];
2614         prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2615         prm->sg = NULL;
2616         prm->seg_cnt = -1;
2617         prm->req_cnt = 1;
2618         prm->residual = 0;
2619         prm->add_status_pkt = 0;
2620         prm->prot_sg = NULL;
2621         prm->prot_seg_cnt = 0;
2622         prm->tot_dsds = 0;
2623
2624         if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2625                 if  (qlt_pci_map_calc_cnt(prm) != 0)
2626                         return -EAGAIN;
2627         }
2628
2629         *full_req_cnt = prm->req_cnt;
2630
2631         if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2632                 prm->residual = se_cmd->residual_count;
2633                 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2634                     "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2635                        prm->residual, se_cmd->tag,
2636                        se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2637                        cmd->bufflen, prm->rq_result);
2638                 prm->rq_result |= SS_RESIDUAL_UNDER;
2639         } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2640                 prm->residual = se_cmd->residual_count;
2641                 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2642                     "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2643                        prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2644                        se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2645                 prm->rq_result |= SS_RESIDUAL_OVER;
2646         }
2647
2648         if (xmit_type & QLA_TGT_XMIT_STATUS) {
2649                 /*
2650                  * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2651                  * ignored in *xmit_response() below
2652                  */
2653                 if (qlt_has_data(cmd)) {
2654                         if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2655                             (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2656                             (prm->rq_result != 0))) {
2657                                 prm->add_status_pkt = 1;
2658                                 (*full_req_cnt)++;
2659                         }
2660                 }
2661         }
2662
2663         return 0;
2664 }
2665
2666 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2667     int sending_sense)
2668 {
2669         if (cmd->qpair->enable_class_2)
2670                 return 0;
2671
2672         if (sending_sense)
2673                 return cmd->conf_compl_supported;
2674         else
2675                 return cmd->qpair->enable_explicit_conf &&
2676                     cmd->conf_compl_supported;
2677 }
2678
2679 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2680         struct qla_tgt_prm *prm)
2681 {
2682         prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2683             (uint32_t)sizeof(ctio->u.status1.sense_data));
2684         ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2685         if (qlt_need_explicit_conf(prm->cmd, 0)) {
2686                 ctio->u.status0.flags |= cpu_to_le16(
2687                     CTIO7_FLAGS_EXPLICIT_CONFORM |
2688                     CTIO7_FLAGS_CONFORM_REQ);
2689         }
2690         ctio->u.status0.residual = cpu_to_le32(prm->residual);
2691         ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2692         if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2693                 int i;
2694
2695                 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2696                         if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2697                                 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2698                                     "Skipping EXPLICIT_CONFORM and "
2699                                     "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2700                                     "non GOOD status\n");
2701                                 goto skip_explict_conf;
2702                         }
2703                         ctio->u.status1.flags |= cpu_to_le16(
2704                             CTIO7_FLAGS_EXPLICIT_CONFORM |
2705                             CTIO7_FLAGS_CONFORM_REQ);
2706                 }
2707 skip_explict_conf:
2708                 ctio->u.status1.flags &=
2709                     ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2710                 ctio->u.status1.flags |=
2711                     cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2712                 ctio->u.status1.scsi_status |=
2713                     cpu_to_le16(SS_SENSE_LEN_VALID);
2714                 ctio->u.status1.sense_length =
2715                     cpu_to_le16(prm->sense_buffer_len);
2716                 for (i = 0; i < prm->sense_buffer_len/4; i++)
2717                         ((uint32_t *)ctio->u.status1.sense_data)[i] =
2718                                 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2719
2720                 qlt_print_dif_err(prm);
2721
2722         } else {
2723                 ctio->u.status1.flags &=
2724                     ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2725                 ctio->u.status1.flags |=
2726                     cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2727                 ctio->u.status1.sense_length = 0;
2728                 memset(ctio->u.status1.sense_data, 0,
2729                     sizeof(ctio->u.status1.sense_data));
2730         }
2731
2732         /* Sense with len > 24, is it possible ??? */
2733 }
2734
2735 static inline int
2736 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2737 {
2738         switch (se_cmd->prot_op) {
2739         case TARGET_PROT_DOUT_INSERT:
2740         case TARGET_PROT_DIN_STRIP:
2741                 if (ql2xenablehba_err_chk >= 1)
2742                         return 1;
2743                 break;
2744         case TARGET_PROT_DOUT_PASS:
2745         case TARGET_PROT_DIN_PASS:
2746                 if (ql2xenablehba_err_chk >= 2)
2747                         return 1;
2748                 break;
2749         case TARGET_PROT_DIN_INSERT:
2750         case TARGET_PROT_DOUT_STRIP:
2751                 return 1;
2752         default:
2753                 break;
2754         }
2755         return 0;
2756 }
2757
2758 static inline int
2759 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2760 {
2761         switch (se_cmd->prot_op) {
2762         case TARGET_PROT_DIN_INSERT:
2763         case TARGET_PROT_DOUT_INSERT:
2764         case TARGET_PROT_DIN_STRIP:
2765         case TARGET_PROT_DOUT_STRIP:
2766         case TARGET_PROT_DIN_PASS:
2767         case TARGET_PROT_DOUT_PASS:
2768             return 1;
2769         default:
2770             return 0;
2771         }
2772         return 0;
2773 }
2774
2775 /*
2776  * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2777  */
2778 static void
2779 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2780     uint16_t *pfw_prot_opts)
2781 {
2782         struct se_cmd *se_cmd = &cmd->se_cmd;
2783         uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2784         scsi_qla_host_t *vha = cmd->tgt->vha;
2785         struct qla_hw_data *ha = vha->hw;
2786         uint32_t t32 = 0;
2787
2788         /*
2789          * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2790          * have been immplemented by TCM, before AppTag is avail.
2791          * Look for modesense_handlers[]
2792          */
2793         ctx->app_tag = 0;
2794         ctx->app_tag_mask[0] = 0x0;
2795         ctx->app_tag_mask[1] = 0x0;
2796
2797         if (IS_PI_UNINIT_CAPABLE(ha)) {
2798                 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2799                     (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2800                         *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2801                 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2802                         *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2803         }
2804
2805         t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2806
2807         switch (se_cmd->prot_type) {
2808         case TARGET_DIF_TYPE0_PROT:
2809                 /*
2810                  * No check for ql2xenablehba_err_chk, as it
2811                  * would be an I/O error if hba tag generation
2812                  * is not done.
2813                  */
2814                 ctx->ref_tag = cpu_to_le32(lba);
2815                 /* enable ALL bytes of the ref tag */
2816                 ctx->ref_tag_mask[0] = 0xff;
2817                 ctx->ref_tag_mask[1] = 0xff;
2818                 ctx->ref_tag_mask[2] = 0xff;
2819                 ctx->ref_tag_mask[3] = 0xff;
2820                 break;
2821         case TARGET_DIF_TYPE1_PROT:
2822             /*
2823              * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2824              * REF tag, and 16 bit app tag.
2825              */
2826             ctx->ref_tag = cpu_to_le32(lba);
2827             if (!qla_tgt_ref_mask_check(se_cmd) ||
2828                 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2829                     *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2830                     break;
2831             }
2832             /* enable ALL bytes of the ref tag */
2833             ctx->ref_tag_mask[0] = 0xff;
2834             ctx->ref_tag_mask[1] = 0xff;
2835             ctx->ref_tag_mask[2] = 0xff;
2836             ctx->ref_tag_mask[3] = 0xff;
2837             break;
2838         case TARGET_DIF_TYPE2_PROT:
2839             /*
2840              * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2841              * tag has to match LBA in CDB + N
2842              */
2843             ctx->ref_tag = cpu_to_le32(lba);
2844             if (!qla_tgt_ref_mask_check(se_cmd) ||
2845                 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2846                     *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2847                     break;
2848             }
2849             /* enable ALL bytes of the ref tag */
2850             ctx->ref_tag_mask[0] = 0xff;
2851             ctx->ref_tag_mask[1] = 0xff;
2852             ctx->ref_tag_mask[2] = 0xff;
2853             ctx->ref_tag_mask[3] = 0xff;
2854             break;
2855         case TARGET_DIF_TYPE3_PROT:
2856             /* For TYPE 3 protection: 16 bit GUARD only */
2857             *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2858             ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2859                 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2860             break;
2861         }
2862 }
2863
2864 static inline int
2865 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2866 {
2867         uint32_t                *cur_dsd;
2868         uint32_t                transfer_length = 0;
2869         uint32_t                data_bytes;
2870         uint32_t                dif_bytes;
2871         uint8_t                 bundling = 1;
2872         struct crc_context      *crc_ctx_pkt = NULL;
2873         struct qla_hw_data      *ha;
2874         struct ctio_crc2_to_fw  *pkt;
2875         dma_addr_t              crc_ctx_dma;
2876         uint16_t                fw_prot_opts = 0;
2877         struct qla_tgt_cmd      *cmd = prm->cmd;
2878         struct se_cmd           *se_cmd = &cmd->se_cmd;
2879         uint32_t h;
2880         struct atio_from_isp *atio = &prm->cmd->atio;
2881         struct qla_tc_param     tc;
2882         uint16_t t16;
2883         scsi_qla_host_t *vha = cmd->vha;
2884
2885         ha = vha->hw;
2886
2887         pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
2888         prm->pkt = pkt;
2889         memset(pkt, 0, sizeof(*pkt));
2890
2891         ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
2892                 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2893                 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2894                 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2895
2896         if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2897             (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2898                 bundling = 0;
2899
2900         /* Compute dif len and adjust data len to incude protection */
2901         data_bytes = cmd->bufflen;
2902         dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
2903
2904         switch (se_cmd->prot_op) {
2905         case TARGET_PROT_DIN_INSERT:
2906         case TARGET_PROT_DOUT_STRIP:
2907                 transfer_length = data_bytes;
2908                 if (cmd->prot_sg_cnt)
2909                         data_bytes += dif_bytes;
2910                 break;
2911         case TARGET_PROT_DIN_STRIP:
2912         case TARGET_PROT_DOUT_INSERT:
2913         case TARGET_PROT_DIN_PASS:
2914         case TARGET_PROT_DOUT_PASS:
2915                 transfer_length = data_bytes + dif_bytes;
2916                 break;
2917         default:
2918                 BUG();
2919                 break;
2920         }
2921
2922         if (!qlt_hba_err_chk_enabled(se_cmd))
2923                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2924         /* HBA error checking enabled */
2925         else if (IS_PI_UNINIT_CAPABLE(ha)) {
2926                 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2927                     (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2928                         fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2929                 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2930                         fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2931         }
2932
2933         switch (se_cmd->prot_op) {
2934         case TARGET_PROT_DIN_INSERT:
2935         case TARGET_PROT_DOUT_INSERT:
2936                 fw_prot_opts |= PO_MODE_DIF_INSERT;
2937                 break;
2938         case TARGET_PROT_DIN_STRIP:
2939         case TARGET_PROT_DOUT_STRIP:
2940                 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2941                 break;
2942         case TARGET_PROT_DIN_PASS:
2943         case TARGET_PROT_DOUT_PASS:
2944                 fw_prot_opts |= PO_MODE_DIF_PASS;
2945                 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2946                 break;
2947         default:/* Normal Request */
2948                 fw_prot_opts |= PO_MODE_DIF_PASS;
2949                 break;
2950         }
2951
2952         /* ---- PKT ---- */
2953         /* Update entry type to indicate Command Type CRC_2 IOCB */
2954         pkt->entry_type  = CTIO_CRC2;
2955         pkt->entry_count = 1;
2956         pkt->vp_index = cmd->vp_idx;
2957
2958         h = qlt_make_handle(qpair);
2959         if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2960                 /*
2961                  * CTIO type 7 from the firmware doesn't provide a way to
2962                  * know the initiator's LOOP ID, hence we can't find
2963                  * the session and, so, the command.
2964                  */
2965                 return -EAGAIN;
2966         } else
2967                 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2968
2969         pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
2970         pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2971         pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2972         pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2973         pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2974         pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2975         pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2976         pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2977
2978         /* silence compile warning */
2979         t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2980         pkt->ox_id  = cpu_to_le16(t16);
2981
2982         t16 = (atio->u.isp24.attr << 9);
2983         pkt->flags |= cpu_to_le16(t16);
2984         pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2985
2986         /* Set transfer direction */
2987         if (cmd->dma_data_direction == DMA_TO_DEVICE)
2988                 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2989         else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2990                 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2991
2992         pkt->dseg_count = prm->tot_dsds;
2993         /* Fibre channel byte count */
2994         pkt->transfer_length = cpu_to_le32(transfer_length);
2995
2996         /* ----- CRC context -------- */
2997
2998         /* Allocate CRC context from global pool */
2999         crc_ctx_pkt = cmd->ctx =
3000             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3001
3002         if (!crc_ctx_pkt)
3003                 goto crc_queuing_error;
3004
3005         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3006         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3007
3008         /* Set handle */
3009         crc_ctx_pkt->handle = pkt->handle;
3010
3011         qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3012
3013         pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
3014         pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
3015         pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
3016
3017         if (!bundling) {
3018                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
3019         } else {
3020                 /*
3021                  * Configure Bundling if we need to fetch interlaving
3022                  * protection PCI accesses
3023                  */
3024                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3025                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3026                 crc_ctx_pkt->u.bundling.dseg_count =
3027                         cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3028                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
3029         }
3030
3031         /* Finish the common fields of CRC pkt */
3032         crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
3033         crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
3034         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3035         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3036
3037         memset((uint8_t *)&tc, 0 , sizeof(tc));
3038         tc.vha = vha;
3039         tc.blk_sz = cmd->blk_sz;
3040         tc.bufflen = cmd->bufflen;
3041         tc.sg = cmd->sg;
3042         tc.prot_sg = cmd->prot_sg;
3043         tc.ctx = crc_ctx_pkt;
3044         tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3045
3046         /* Walks data segments */
3047         pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3048
3049         if (!bundling && prm->prot_seg_cnt) {
3050                 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3051                         prm->tot_dsds, &tc))
3052                         goto crc_queuing_error;
3053         } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3054                 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3055                 goto crc_queuing_error;
3056
3057         if (bundling && prm->prot_seg_cnt) {
3058                 /* Walks dif segments */
3059                 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3060
3061                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
3062                 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3063                         prm->prot_seg_cnt, &tc))
3064                         goto crc_queuing_error;
3065         }
3066         return QLA_SUCCESS;
3067
3068 crc_queuing_error:
3069         /* Cleanup will be performed by the caller */
3070         qpair->req->outstanding_cmds[h] = NULL;
3071
3072         return QLA_FUNCTION_FAILED;
3073 }
3074
3075 /*
3076  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3077  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3078  */
3079 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3080         uint8_t scsi_status)
3081 {
3082         struct scsi_qla_host *vha = cmd->vha;
3083         struct qla_qpair *qpair = cmd->qpair;
3084         struct ctio7_to_24xx *pkt;
3085         struct qla_tgt_prm prm;
3086         uint32_t full_req_cnt = 0;
3087         unsigned long flags = 0;
3088         int res;
3089
3090         if (cmd->sess && cmd->sess->deleted) {
3091                 cmd->state = QLA_TGT_STATE_PROCESSED;
3092                 if (cmd->sess->logout_completed)
3093                         /* no need to terminate. FW already freed exchange. */
3094                         qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3095                 else
3096                         qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
3097                 return 0;
3098         }
3099
3100         ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3101             "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3102             (xmit_type & QLA_TGT_XMIT_STATUS) ?
3103             1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3104             &cmd->se_cmd, qpair->id);
3105
3106         res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3107             &full_req_cnt);
3108         if (unlikely(res != 0)) {
3109                 return res;
3110         }
3111
3112         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3113
3114         if (xmit_type == QLA_TGT_XMIT_STATUS)
3115                 qpair->tgt_counters.core_qla_snd_status++;
3116         else
3117                 qpair->tgt_counters.core_qla_que_buf++;
3118
3119         if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3120                 /*
3121                  * Either the port is not online or this request was from
3122                  * previous life, just abort the processing.
3123                  */
3124                 cmd->state = QLA_TGT_STATE_PROCESSED;
3125                 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3126                 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3127                         "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3128                         vha->flags.online, qla2x00_reset_active(vha),
3129                         cmd->reset_count, qpair->chip_reset);
3130                 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3131                 return 0;
3132         }
3133
3134         /* Does F/W have an IOCBs for this request */
3135         res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3136         if (unlikely(res))
3137                 goto out_unmap_unlock;
3138
3139         if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3140                 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3141         else
3142                 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3143         if (unlikely(res != 0)) {
3144                 qpair->req->cnt += full_req_cnt;
3145                 goto out_unmap_unlock;
3146         }
3147
3148         pkt = (struct ctio7_to_24xx *)prm.pkt;
3149
3150         if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3151                 pkt->u.status0.flags |=
3152                     cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3153                         CTIO7_FLAGS_STATUS_MODE_0);
3154
3155                 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3156                         qlt_load_data_segments(&prm);
3157
3158                 if (prm.add_status_pkt == 0) {
3159                         if (xmit_type & QLA_TGT_XMIT_STATUS) {
3160                                 pkt->u.status0.scsi_status =
3161                                     cpu_to_le16(prm.rq_result);
3162                                 pkt->u.status0.residual =
3163                                     cpu_to_le32(prm.residual);
3164                                 pkt->u.status0.flags |= cpu_to_le16(
3165                                     CTIO7_FLAGS_SEND_STATUS);
3166                                 if (qlt_need_explicit_conf(cmd, 0)) {
3167                                         pkt->u.status0.flags |=
3168                                             cpu_to_le16(
3169                                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
3170                                                 CTIO7_FLAGS_CONFORM_REQ);
3171                                 }
3172                         }
3173
3174                 } else {
3175                         /*
3176                          * We have already made sure that there is sufficient
3177                          * amount of request entries to not drop HW lock in
3178                          * req_pkt().
3179                          */
3180                         struct ctio7_to_24xx *ctio =
3181                                 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3182                                     qpair->req);
3183
3184                         ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3185                             "Building additional status packet 0x%p.\n",
3186                             ctio);
3187
3188                         /*
3189                          * T10Dif: ctio_crc2_to_fw overlay ontop of
3190                          * ctio7_to_24xx
3191                          */
3192                         memcpy(ctio, pkt, sizeof(*ctio));
3193                         /* reset back to CTIO7 */
3194                         ctio->entry_count = 1;
3195                         ctio->entry_type = CTIO_TYPE7;
3196                         ctio->dseg_count = 0;
3197                         ctio->u.status1.flags &= ~cpu_to_le16(
3198                             CTIO7_FLAGS_DATA_IN);
3199
3200                         /* Real finish is ctio_m1's finish */
3201                         pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3202                         pkt->u.status0.flags |= cpu_to_le16(
3203                             CTIO7_FLAGS_DONT_RET_CTIO);
3204
3205                         /* qlt_24xx_init_ctio_to_isp will correct
3206                          * all neccessary fields that's part of CTIO7.
3207                          * There should be no residual of CTIO-CRC2 data.
3208                          */
3209                         qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3210                             &prm);
3211                 }
3212         } else
3213                 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3214
3215
3216         cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3217         cmd->cmd_sent_to_fw = 1;
3218
3219         /* Memory Barrier */
3220         wmb();
3221         if (qpair->reqq_start_iocbs)
3222                 qpair->reqq_start_iocbs(qpair);
3223         else
3224                 qla2x00_start_iocbs(vha, qpair->req);
3225         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3226
3227         return 0;
3228
3229 out_unmap_unlock:
3230         qlt_unmap_sg(vha, cmd);
3231         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3232
3233         return res;
3234 }
3235 EXPORT_SYMBOL(qlt_xmit_response);
3236
3237 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3238 {
3239         struct ctio7_to_24xx *pkt;
3240         struct scsi_qla_host *vha = cmd->vha;
3241         struct qla_tgt *tgt = cmd->tgt;
3242         struct qla_tgt_prm prm;
3243         unsigned long flags = 0;
3244         int res = 0;
3245         struct qla_qpair *qpair = cmd->qpair;
3246
3247         memset(&prm, 0, sizeof(prm));
3248         prm.cmd = cmd;
3249         prm.tgt = tgt;
3250         prm.sg = NULL;
3251         prm.req_cnt = 1;
3252
3253         /* Calculate number of entries and segments required */
3254         if (qlt_pci_map_calc_cnt(&prm) != 0)
3255                 return -EAGAIN;
3256
3257         if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3258             (cmd->sess && cmd->sess->deleted)) {
3259                 /*
3260                  * Either the port is not online or this request was from
3261                  * previous life, just abort the processing.
3262                  */
3263                 cmd->state = QLA_TGT_STATE_NEED_DATA;
3264                 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3265                 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3266                         "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3267                         vha->flags.online, qla2x00_reset_active(vha),
3268                         cmd->reset_count, qpair->chip_reset);
3269                 return 0;
3270         }
3271
3272         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3273         /* Does F/W have an IOCBs for this request */
3274         res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3275         if (res != 0)
3276                 goto out_unlock_free_unmap;
3277         if (cmd->se_cmd.prot_op)
3278                 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3279         else
3280                 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3281
3282         if (unlikely(res != 0)) {
3283                 qpair->req->cnt += prm.req_cnt;
3284                 goto out_unlock_free_unmap;
3285         }
3286
3287         pkt = (struct ctio7_to_24xx *)prm.pkt;
3288         pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3289             CTIO7_FLAGS_STATUS_MODE_0);
3290
3291         if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3292                 qlt_load_data_segments(&prm);
3293
3294         cmd->state = QLA_TGT_STATE_NEED_DATA;
3295         cmd->cmd_sent_to_fw = 1;
3296
3297         /* Memory Barrier */
3298         wmb();
3299         if (qpair->reqq_start_iocbs)
3300                 qpair->reqq_start_iocbs(qpair);
3301         else
3302                 qla2x00_start_iocbs(vha, qpair->req);
3303         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3304
3305         return res;
3306
3307 out_unlock_free_unmap:
3308         qlt_unmap_sg(vha, cmd);
3309         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3310
3311         return res;
3312 }
3313 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3314
3315
3316 /*
3317  * it is assumed either hardware_lock or qpair lock is held.
3318  */
3319 static void
3320 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3321         struct ctio_crc_from_fw *sts)
3322 {
3323         uint8_t         *ap = &sts->actual_dif[0];
3324         uint8_t         *ep = &sts->expected_dif[0];
3325         uint64_t        lba = cmd->se_cmd.t_task_lba;
3326         uint8_t scsi_status, sense_key, asc, ascq;
3327         unsigned long flags;
3328         struct scsi_qla_host *vha = cmd->vha;
3329
3330         cmd->trc_flags |= TRC_DIF_ERR;
3331
3332         cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
3333         cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3334         cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3335
3336         cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
3337         cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3338         cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3339
3340         ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3341             "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3342
3343         scsi_status = sense_key = asc = ascq = 0;
3344
3345         /* check appl tag */
3346         if (cmd->e_app_tag != cmd->a_app_tag) {
3347                 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3348                     "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3349                     cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3350                     cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3351                     cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3352                     cmd->atio.u.isp24.fcp_hdr.ox_id);
3353
3354                 cmd->dif_err_code = DIF_ERR_APP;
3355                 scsi_status = SAM_STAT_CHECK_CONDITION;
3356                 sense_key = ABORTED_COMMAND;
3357                 asc = 0x10;
3358                 ascq = 0x2;
3359         }
3360
3361         /* check ref tag */
3362         if (cmd->e_ref_tag != cmd->a_ref_tag) {
3363                 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3364                     "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3365                     cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3366                     cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3367                     cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3368                     cmd->atio.u.isp24.fcp_hdr.ox_id);
3369
3370                 cmd->dif_err_code = DIF_ERR_REF;
3371                 scsi_status = SAM_STAT_CHECK_CONDITION;
3372                 sense_key = ABORTED_COMMAND;
3373                 asc = 0x10;
3374                 ascq = 0x3;
3375                 goto out;
3376         }
3377
3378         /* check guard */
3379         if (cmd->e_guard != cmd->a_guard) {
3380                 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3381                     "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3382                     cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3383                     cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3384                     cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3385                     cmd->atio.u.isp24.fcp_hdr.ox_id);
3386
3387                 cmd->dif_err_code = DIF_ERR_GRD;
3388                 scsi_status = SAM_STAT_CHECK_CONDITION;
3389                 sense_key = ABORTED_COMMAND;
3390                 asc = 0x10;
3391                 ascq = 0x1;
3392         }
3393 out:
3394         switch (cmd->state) {
3395         case QLA_TGT_STATE_NEED_DATA:
3396                 /* handle_data will load DIF error code  */
3397                 cmd->state = QLA_TGT_STATE_DATA_IN;
3398                 vha->hw->tgt.tgt_ops->handle_data(cmd);
3399                 break;
3400         default:
3401                 spin_lock_irqsave(&cmd->cmd_lock, flags);
3402                 if (cmd->aborted) {
3403                         spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3404                         vha->hw->tgt.tgt_ops->free_cmd(cmd);
3405                         break;
3406                 }
3407                 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3408
3409                 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3410                     ascq);
3411                 /* assume scsi status gets out on the wire.
3412                  * Will not wait for completion.
3413                  */
3414                 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3415                 break;
3416         }
3417 }
3418
3419 /* If hardware_lock held on entry, might drop it, then reaquire */
3420 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3421 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3422         struct imm_ntfy_from_isp *ntfy)
3423 {
3424         struct nack_to_isp *nack;
3425         struct qla_hw_data *ha = vha->hw;
3426         request_t *pkt;
3427         int ret = 0;
3428
3429         ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3430             "Sending TERM ELS CTIO (ha=%p)\n", ha);
3431
3432         pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3433         if (pkt == NULL) {
3434                 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3435                     "qla_target(%d): %s failed: unable to allocate "
3436                     "request packet\n", vha->vp_idx, __func__);
3437                 return -ENOMEM;
3438         }
3439
3440         pkt->entry_type = NOTIFY_ACK_TYPE;
3441         pkt->entry_count = 1;
3442         pkt->handle = QLA_TGT_SKIP_HANDLE;
3443
3444         nack = (struct nack_to_isp *)pkt;
3445         nack->ox_id = ntfy->ox_id;
3446
3447         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3448         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3449                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3450                         __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3451         }
3452
3453         /* terminate */
3454         nack->u.isp24.flags |=
3455                 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3456
3457         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3458         nack->u.isp24.status = ntfy->u.isp24.status;
3459         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3460         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3461         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3462         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3463         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3464         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3465
3466         qla2x00_start_iocbs(vha, vha->req);
3467         return ret;
3468 }
3469
3470 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3471         struct imm_ntfy_from_isp *imm, int ha_locked)
3472 {
3473         unsigned long flags = 0;
3474         int rc;
3475
3476         if (ha_locked) {
3477                 rc = __qlt_send_term_imm_notif(vha, imm);
3478
3479 #if 0   /* Todo  */
3480                 if (rc == -ENOMEM)
3481                         qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3482 #else
3483                 if (rc) {
3484                 }
3485 #endif
3486                 goto done;
3487         }
3488
3489         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3490         rc = __qlt_send_term_imm_notif(vha, imm);
3491
3492 #if 0   /* Todo */
3493         if (rc == -ENOMEM)
3494                 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3495 #endif
3496
3497 done:
3498         if (!ha_locked)
3499                 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3500 }
3501
3502 /*
3503  * If hardware_lock held on entry, might drop it, then reaquire
3504  * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3505  */
3506 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3507         struct qla_tgt_cmd *cmd,
3508         struct atio_from_isp *atio)
3509 {
3510         struct scsi_qla_host *vha = qpair->vha;
3511         struct ctio7_to_24xx *ctio24;
3512         struct qla_hw_data *ha = vha->hw;
3513         request_t *pkt;
3514         int ret = 0;
3515         uint16_t temp;
3516
3517         ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3518
3519         if (cmd)
3520                 vha = cmd->vha;
3521
3522         pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3523         if (pkt == NULL) {
3524                 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3525                     "qla_target(%d): %s failed: unable to allocate "
3526                     "request packet\n", vha->vp_idx, __func__);
3527                 return -ENOMEM;
3528         }
3529
3530         if (cmd != NULL) {
3531                 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3532                         ql_dbg(ql_dbg_tgt, vha, 0xe051,
3533                             "qla_target(%d): Terminating cmd %p with "
3534                             "incorrect state %d\n", vha->vp_idx, cmd,
3535                             cmd->state);
3536                 } else
3537                         ret = 1;
3538         }
3539
3540         qpair->tgt_counters.num_term_xchg_sent++;
3541         pkt->entry_count = 1;
3542         pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3543
3544         ctio24 = (struct ctio7_to_24xx *)pkt;
3545         ctio24->entry_type = CTIO_TYPE7;
3546         ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3547         ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3548         ctio24->vp_index = vha->vp_idx;
3549         ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3550         ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3551         ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3552         ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3553         temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3554                 CTIO7_FLAGS_TERMINATE;
3555         ctio24->u.status1.flags = cpu_to_le16(temp);
3556         temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3557         ctio24->u.status1.ox_id = cpu_to_le16(temp);
3558
3559         /* Most likely, it isn't needed */
3560         ctio24->u.status1.residual = get_unaligned((uint32_t *)
3561             &atio->u.isp24.fcp_cmnd.add_cdb[
3562             atio->u.isp24.fcp_cmnd.add_cdb_len]);
3563         if (ctio24->u.status1.residual != 0)
3564                 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3565
3566         /* Memory Barrier */
3567         wmb();
3568         if (qpair->reqq_start_iocbs)
3569                 qpair->reqq_start_iocbs(qpair);
3570         else
3571                 qla2x00_start_iocbs(vha, qpair->req);
3572         return ret;
3573 }
3574
3575 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3576         struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3577         int ul_abort)
3578 {
3579         struct scsi_qla_host *vha;
3580         unsigned long flags = 0;
3581         int rc;
3582
3583         /* why use different vha? NPIV */
3584         if (cmd)
3585                 vha = cmd->vha;
3586         else
3587                 vha = qpair->vha;
3588
3589         if (ha_locked) {
3590                 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3591                 if (rc == -ENOMEM)
3592                         qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3593                 goto done;
3594         }
3595         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3596         rc = __qlt_send_term_exchange(qpair, cmd, atio);
3597         if (rc == -ENOMEM)
3598                 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3599
3600 done:
3601         if (cmd && !ul_abort && !cmd->aborted) {
3602                 if (cmd->sg_mapped)
3603                         qlt_unmap_sg(vha, cmd);
3604                 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3605         }
3606
3607         if (!ha_locked)
3608                 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3609
3610         return;
3611 }
3612
3613 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3614 {
3615         struct list_head free_list;
3616         struct qla_tgt_cmd *cmd, *tcmd;
3617
3618         vha->hw->tgt.leak_exchg_thresh_hold =
3619             (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3620
3621         cmd = tcmd = NULL;
3622         if (!list_empty(&vha->hw->tgt.q_full_list)) {
3623                 INIT_LIST_HEAD(&free_list);
3624                 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3625
3626                 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3627                         list_del(&cmd->cmd_list);
3628                         /* This cmd was never sent to TCM.  There is no need
3629                          * to schedule free or call free_cmd
3630                          */
3631                         qlt_free_cmd(cmd);
3632                         vha->hw->tgt.num_qfull_cmds_alloc--;
3633                 }
3634         }
3635         vha->hw->tgt.num_qfull_cmds_dropped = 0;
3636 }
3637
3638 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3639 {
3640         uint32_t total_leaked;
3641
3642         total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3643
3644         if (vha->hw->tgt.leak_exchg_thresh_hold &&
3645             (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3646
3647                 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3648                     "Chip reset due to exchange starvation: %d/%d.\n",
3649                     total_leaked, vha->hw->cur_fw_xcb_count);
3650
3651                 if (IS_P3P_TYPE(vha->hw))
3652                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3653                 else
3654                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3655                 qla2xxx_wake_dpc(vha);
3656         }
3657
3658 }
3659
3660 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3661 {
3662         struct qla_tgt *tgt = cmd->tgt;
3663         struct scsi_qla_host *vha = tgt->vha;
3664         struct se_cmd *se_cmd = &cmd->se_cmd;
3665         unsigned long flags;
3666
3667         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3668             "qla_target(%d): terminating exchange for aborted cmd=%p "
3669             "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3670             se_cmd->tag);
3671
3672         spin_lock_irqsave(&cmd->cmd_lock, flags);
3673         if (cmd->aborted) {
3674                 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3675                 /*
3676                  * It's normal to see 2 calls in this path:
3677                  *  1) XFER Rdy completion + CMD_T_ABORT
3678                  *  2) TCM TMR - drain_state_list
3679                  */
3680                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3681                     "multiple abort. %p transport_state %x, t_state %x, "
3682                     "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3683                     cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3684                 return EIO;
3685         }
3686         cmd->aborted = 1;
3687         cmd->trc_flags |= TRC_ABORT;
3688         spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3689
3690         qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3691         return 0;
3692 }
3693 EXPORT_SYMBOL(qlt_abort_cmd);
3694
3695 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3696 {
3697         struct fc_port *sess = cmd->sess;
3698
3699         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3700             "%s: se_cmd[%p] ox_id %04x\n",
3701             __func__, &cmd->se_cmd,
3702             be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3703
3704         BUG_ON(cmd->cmd_in_wq);
3705
3706         if (cmd->sg_mapped)
3707                 qlt_unmap_sg(cmd->vha, cmd);
3708
3709         if (!cmd->q_full)
3710                 qlt_decr_num_pend_cmds(cmd->vha);
3711
3712         BUG_ON(cmd->sg_mapped);
3713         cmd->jiffies_at_free = get_jiffies_64();
3714         if (unlikely(cmd->free_sg))
3715                 kfree(cmd->sg);
3716
3717         if (!sess || !sess->se_sess) {
3718                 WARN_ON(1);
3719                 return;
3720         }
3721         cmd->jiffies_at_free = get_jiffies_64();
3722         percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3723 }
3724 EXPORT_SYMBOL(qlt_free_cmd);
3725
3726 /*
3727  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3728  */
3729 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3730         struct qla_tgt_cmd *cmd, uint32_t status)
3731 {
3732         int term = 0;
3733         struct scsi_qla_host *vha = qpair->vha;
3734
3735         if (cmd->se_cmd.prot_op)
3736                 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3737                     "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3738                     "se_cmd=%p tag[%x] op %#x/%s",
3739                      cmd->lba, cmd->lba,
3740                      cmd->num_blks, &cmd->se_cmd,
3741                      cmd->atio.u.isp24.exchange_addr,
3742                      cmd->se_cmd.prot_op,
3743                      prot_op_str(cmd->se_cmd.prot_op));
3744
3745         if (ctio != NULL) {
3746                 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3747                 term = !(c->flags &
3748                     cpu_to_le16(OF_TERM_EXCH));
3749         } else
3750                 term = 1;
3751
3752         if (term)
3753                 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3754
3755         return term;
3756 }
3757
3758
3759 /* ha->hardware_lock supposed to be held on entry */
3760 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3761         struct rsp_que *rsp, uint32_t handle, void *ctio)
3762 {
3763         struct qla_tgt_cmd *cmd = NULL;
3764         struct req_que *req;
3765         int qid = GET_QID(handle);
3766         uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3767
3768         if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3769                 return NULL;
3770
3771         if (qid == rsp->req->id) {
3772                 req = rsp->req;
3773         } else if (vha->hw->req_q_map[qid]) {
3774                 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3775                     "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3776                     vha->vp_idx, rsp->id, handle);
3777                 req = vha->hw->req_q_map[qid];
3778         } else {
3779                 return NULL;
3780         }
3781
3782         h &= QLA_CMD_HANDLE_MASK;
3783
3784         if (h != QLA_TGT_NULL_HANDLE) {
3785                 if (unlikely(h >= req->num_outstanding_cmds)) {
3786                         ql_dbg(ql_dbg_tgt, vha, 0xe052,
3787                             "qla_target(%d): Wrong handle %x received\n",
3788                             vha->vp_idx, handle);
3789                         return NULL;
3790                 }
3791
3792                 cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
3793                 if (unlikely(cmd == NULL)) {
3794                         ql_dbg(ql_dbg_async, vha, 0xe053,
3795                             "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3796                                 vha->vp_idx, handle, req->id, rsp->id);
3797                         return NULL;
3798                 }
3799                 req->outstanding_cmds[h] = NULL;
3800         } else if (ctio != NULL) {
3801                 /* We can't get loop ID from CTIO7 */
3802                 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3803                     "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3804                     "support NULL handles\n", vha->vp_idx);
3805                 return NULL;
3806         }
3807
3808         return cmd;
3809 }
3810
3811 /* hardware_lock should be held by caller. */
3812 void
3813 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3814 {
3815         struct qla_hw_data *ha = vha->hw;
3816
3817         if (cmd->sg_mapped)
3818                 qlt_unmap_sg(vha, cmd);
3819
3820         /* TODO: fix debug message type and ids. */
3821         if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3822                 ql_dbg(ql_dbg_io, vha, 0xff00,
3823                     "HOST-ABORT: state=PROCESSED.\n");
3824         } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3825                 cmd->write_data_transferred = 0;
3826                 cmd->state = QLA_TGT_STATE_DATA_IN;
3827
3828                 ql_dbg(ql_dbg_io, vha, 0xff01,
3829                     "HOST-ABORT: state=DATA_IN.\n");
3830
3831                 ha->tgt.tgt_ops->handle_data(cmd);
3832                 return;
3833         } else {
3834                 ql_dbg(ql_dbg_io, vha, 0xff03,
3835                     "HOST-ABORT: state=BAD(%d).\n",
3836                     cmd->state);
3837                 dump_stack();
3838         }
3839
3840         cmd->trc_flags |= TRC_FLUSH;
3841         ha->tgt.tgt_ops->free_cmd(cmd);
3842 }
3843
3844 /*
3845  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3846  */
3847 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3848     struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3849 {
3850         struct qla_hw_data *ha = vha->hw;
3851         struct se_cmd *se_cmd;
3852         struct qla_tgt_cmd *cmd;
3853         struct qla_qpair *qpair = rsp->qpair;
3854
3855         if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3856                 /* That could happen only in case of an error/reset/abort */
3857                 if (status != CTIO_SUCCESS) {
3858                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3859                             "Intermediate CTIO received"
3860                             " (status %x)\n", status);
3861                 }
3862                 return;
3863         }
3864
3865         cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3866         if (cmd == NULL)
3867                 return;
3868
3869         se_cmd = &cmd->se_cmd;
3870         cmd->cmd_sent_to_fw = 0;
3871
3872         qlt_unmap_sg(vha, cmd);
3873
3874         if (unlikely(status != CTIO_SUCCESS)) {
3875                 switch (status & 0xFFFF) {
3876                 case CTIO_LIP_RESET:
3877                 case CTIO_TARGET_RESET:
3878                 case CTIO_ABORTED:
3879                         /* driver request abort via Terminate exchange */
3880                 case CTIO_TIMEOUT:
3881                 case CTIO_INVALID_RX_ID:
3882                         /* They are OK */
3883                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3884                             "qla_target(%d): CTIO with "
3885                             "status %#x received, state %x, se_cmd %p, "
3886                             "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3887                             "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3888                             status, cmd->state, se_cmd);
3889                         break;
3890
3891                 case CTIO_PORT_LOGGED_OUT:
3892                 case CTIO_PORT_UNAVAILABLE:
3893                 {
3894                         int logged_out =
3895                                 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3896
3897                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3898                             "qla_target(%d): CTIO with %s status %x "
3899                             "received (state %x, se_cmd %p)\n", vha->vp_idx,
3900                             logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3901                             status, cmd->state, se_cmd);
3902
3903                         if (logged_out && cmd->sess) {
3904                                 /*
3905                                  * Session is already logged out, but we need
3906                                  * to notify initiator, who's not aware of this
3907                                  */
3908                                 cmd->sess->logout_on_delete = 0;
3909                                 cmd->sess->send_els_logo = 1;
3910                                 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3911                                     "%s %d %8phC post del sess\n",
3912                                     __func__, __LINE__, cmd->sess->port_name);
3913
3914                                 qlt_schedule_sess_for_deletion(cmd->sess);
3915                         }
3916                         break;
3917                 }
3918                 case CTIO_DIF_ERROR: {
3919                         struct ctio_crc_from_fw *crc =
3920                                 (struct ctio_crc_from_fw *)ctio;
3921                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3922                             "qla_target(%d): CTIO with DIF_ERROR status %x "
3923                             "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3924                             "expect_dif[0x%llx]\n",
3925                             vha->vp_idx, status, cmd->state, se_cmd,
3926                             *((u64 *)&crc->actual_dif[0]),
3927                             *((u64 *)&crc->expected_dif[0]));
3928
3929                         qlt_handle_dif_error(qpair, cmd, ctio);
3930                         return;
3931                 }
3932                 default:
3933                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3934                             "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3935                             vha->vp_idx, status, cmd->state, se_cmd);
3936                         break;
3937                 }
3938
3939
3940                 /* "cmd->aborted" means
3941                  * cmd is already aborted/terminated, we don't
3942                  * need to terminate again.  The exchange is already
3943                  * cleaned up/freed at FW level.  Just cleanup at driver
3944                  * level.
3945                  */
3946                 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3947                     (!cmd->aborted)) {
3948                         cmd->trc_flags |= TRC_CTIO_ERR;
3949                         if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
3950                                 return;
3951                 }
3952         }
3953
3954         if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3955                 cmd->trc_flags |= TRC_CTIO_DONE;
3956         } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3957                 cmd->state = QLA_TGT_STATE_DATA_IN;
3958
3959                 if (status == CTIO_SUCCESS)
3960                         cmd->write_data_transferred = 1;
3961
3962                 ha->tgt.tgt_ops->handle_data(cmd);
3963                 return;
3964         } else if (cmd->aborted) {
3965                 cmd->trc_flags |= TRC_CTIO_ABORTED;
3966                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3967                   "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3968         } else {
3969                 cmd->trc_flags |= TRC_CTIO_STRANGE;
3970                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3971                     "qla_target(%d): A command in state (%d) should "
3972                     "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3973         }
3974
3975         if (unlikely(status != CTIO_SUCCESS) &&
3976                 !cmd->aborted) {
3977                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3978                 dump_stack();
3979         }
3980
3981         ha->tgt.tgt_ops->free_cmd(cmd);
3982 }
3983
3984 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3985         uint8_t task_codes)
3986 {
3987         int fcp_task_attr;
3988
3989         switch (task_codes) {
3990         case ATIO_SIMPLE_QUEUE:
3991                 fcp_task_attr = TCM_SIMPLE_TAG;
3992                 break;
3993         case ATIO_HEAD_OF_QUEUE:
3994                 fcp_task_attr = TCM_HEAD_TAG;
3995                 break;
3996         case ATIO_ORDERED_QUEUE:
3997                 fcp_task_attr = TCM_ORDERED_TAG;
3998                 break;
3999         case ATIO_ACA_QUEUE:
4000                 fcp_task_attr = TCM_ACA_TAG;
4001                 break;
4002         case ATIO_UNTAGGED:
4003                 fcp_task_attr = TCM_SIMPLE_TAG;
4004                 break;
4005         default:
4006                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4007                     "qla_target: unknown task code %x, use ORDERED instead\n",
4008                     task_codes);
4009                 fcp_task_attr = TCM_ORDERED_TAG;
4010                 break;
4011         }
4012
4013         return fcp_task_attr;
4014 }
4015
4016 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
4017                                         uint8_t *);
4018 /*
4019  * Process context for I/O path into tcm_qla2xxx code
4020  */
4021 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4022 {
4023         scsi_qla_host_t *vha = cmd->vha;
4024         struct qla_hw_data *ha = vha->hw;
4025         struct fc_port *sess = cmd->sess;
4026         struct atio_from_isp *atio = &cmd->atio;
4027         unsigned char *cdb;
4028         unsigned long flags;
4029         uint32_t data_length;
4030         int ret, fcp_task_attr, data_dir, bidi = 0;
4031         struct qla_qpair *qpair = cmd->qpair;
4032
4033         cmd->cmd_in_wq = 0;
4034         cmd->trc_flags |= TRC_DO_WORK;
4035
4036         if (cmd->aborted) {
4037                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4038                     "cmd with tag %u is aborted\n",
4039                     cmd->atio.u.isp24.exchange_addr);
4040                 goto out_term;
4041         }
4042
4043         spin_lock_init(&cmd->cmd_lock);
4044         cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4045         cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4046
4047         if (atio->u.isp24.fcp_cmnd.rddata &&
4048             atio->u.isp24.fcp_cmnd.wrdata) {
4049                 bidi = 1;
4050                 data_dir = DMA_TO_DEVICE;
4051         } else if (atio->u.isp24.fcp_cmnd.rddata)
4052                 data_dir = DMA_FROM_DEVICE;
4053         else if (atio->u.isp24.fcp_cmnd.wrdata)
4054                 data_dir = DMA_TO_DEVICE;
4055         else
4056                 data_dir = DMA_NONE;
4057
4058         fcp_task_attr = qlt_get_fcp_task_attr(vha,
4059             atio->u.isp24.fcp_cmnd.task_attr);
4060         data_length = be32_to_cpu(get_unaligned((uint32_t *)
4061             &atio->u.isp24.fcp_cmnd.add_cdb[
4062             atio->u.isp24.fcp_cmnd.add_cdb_len]));
4063
4064         ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4065                                           fcp_task_attr, data_dir, bidi);
4066         if (ret != 0)
4067                 goto out_term;
4068         /*
4069          * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
4070          */
4071         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4072         ha->tgt.tgt_ops->put_sess(sess);
4073         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4074         return;
4075
4076 out_term:
4077         ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4078         /*
4079          * cmd has not sent to target yet, so pass NULL as the second
4080          * argument to qlt_send_term_exchange() and free the memory here.
4081          */
4082         cmd->trc_flags |= TRC_DO_WORK_ERR;
4083         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4084         qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4085
4086         qlt_decr_num_pend_cmds(vha);
4087         percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
4088         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4089
4090         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4091         ha->tgt.tgt_ops->put_sess(sess);
4092         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4093 }
4094
4095 static void qlt_do_work(struct work_struct *work)
4096 {
4097         struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4098         scsi_qla_host_t *vha = cmd->vha;
4099         unsigned long flags;
4100
4101         spin_lock_irqsave(&vha->cmd_list_lock, flags);
4102         list_del(&cmd->cmd_list);
4103         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4104
4105         __qlt_do_work(cmd);
4106 }
4107
4108 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4109 {
4110         unsigned long flags;
4111         struct qla_hw_data *ha = vha->hw;
4112         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4113         void *node;
4114         u64 key = 0;
4115
4116         ql_log(ql_log_info, vha, 0x706c,
4117             "User update Number of Active Qpairs %d\n",
4118             ha->tgt.num_act_qpairs);
4119
4120         spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4121
4122         btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4123                 btree_remove64(&tgt->lun_qpair_map, key);
4124
4125         ha->base_qpair->lun_cnt = 0;
4126         for (key = 0; key < ha->max_qpairs; key++)
4127                 if (ha->queue_pair_map[key])
4128                         ha->queue_pair_map[key]->lun_cnt = 0;
4129
4130         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4131 }
4132
4133 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4134         struct qla_tgt_cmd *cmd)
4135 {
4136         struct qla_qpair *qpair, *qp;
4137         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4138         struct qla_qpair_hint *h;
4139
4140         if (vha->flags.qpairs_available) {
4141                 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4142                 if (unlikely(!h)) {
4143                         /* spread lun to qpair ratio evently */
4144                         int lcnt = 0, rc;
4145                         struct scsi_qla_host *base_vha =
4146                                 pci_get_drvdata(vha->hw->pdev);
4147
4148                         qpair = vha->hw->base_qpair;
4149                         if (qpair->lun_cnt == 0) {
4150                                 qpair->lun_cnt++;
4151                                 h = qla_qpair_to_hint(tgt, qpair);
4152                                 BUG_ON(!h);
4153                                 rc = btree_insert64(&tgt->lun_qpair_map,
4154                                         cmd->unpacked_lun, h, GFP_ATOMIC);
4155                                 if (rc) {
4156                                         qpair->lun_cnt--;
4157                                         ql_log(ql_log_info, vha, 0xd037,
4158                                             "Unable to insert lun %llx into lun_qpair_map\n",
4159                                             cmd->unpacked_lun);
4160                                 }
4161                                 goto out;
4162                         } else {
4163                                 lcnt = qpair->lun_cnt;
4164                         }
4165
4166                         h = NULL;
4167                         list_for_each_entry(qp, &base_vha->qp_list,
4168                             qp_list_elem) {
4169                                 if (qp->lun_cnt == 0) {
4170                                         qp->lun_cnt++;
4171                                         h = qla_qpair_to_hint(tgt, qp);
4172                                         BUG_ON(!h);
4173                                         rc = btree_insert64(&tgt->lun_qpair_map,
4174                                             cmd->unpacked_lun, h, GFP_ATOMIC);
4175                                         if (rc) {
4176                                                 qp->lun_cnt--;
4177                                                 ql_log(ql_log_info, vha, 0xd038,
4178                                                         "Unable to insert lun %llx into lun_qpair_map\n",
4179                                                         cmd->unpacked_lun);
4180                                         }
4181                                         qpair = qp;
4182                                         goto out;
4183                                 } else {
4184                                         if (qp->lun_cnt < lcnt) {
4185                                                 lcnt = qp->lun_cnt;
4186                                                 qpair = qp;
4187                                                 continue;
4188                                         }
4189                                 }
4190                         }
4191                         BUG_ON(!qpair);
4192                         qpair->lun_cnt++;
4193                         h = qla_qpair_to_hint(tgt, qpair);
4194                         BUG_ON(!h);
4195                         rc = btree_insert64(&tgt->lun_qpair_map,
4196                                 cmd->unpacked_lun, h, GFP_ATOMIC);
4197                         if (rc) {
4198                                 qpair->lun_cnt--;
4199                                 ql_log(ql_log_info, vha, 0xd039,
4200                                    "Unable to insert lun %llx into lun_qpair_map\n",
4201                                    cmd->unpacked_lun);
4202                         }
4203                 }
4204         } else {
4205                 h = &tgt->qphints[0];
4206         }
4207 out:
4208         cmd->qpair = h->qpair;
4209         cmd->se_cmd.cpuid = h->cpuid;
4210 }
4211
4212 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4213                                        struct fc_port *sess,
4214                                        struct atio_from_isp *atio)
4215 {
4216         struct se_session *se_sess = sess->se_sess;
4217         struct qla_tgt_cmd *cmd;
4218         int tag;
4219
4220         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4221         if (tag < 0)
4222                 return NULL;
4223
4224         cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4225         memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4226         cmd->cmd_type = TYPE_TGT_CMD;
4227         memcpy(&cmd->atio, atio, sizeof(*atio));
4228         cmd->state = QLA_TGT_STATE_NEW;
4229         cmd->tgt = vha->vha_tgt.qla_tgt;
4230         qlt_incr_num_pend_cmds(vha);
4231         cmd->vha = vha;
4232         cmd->se_cmd.map_tag = tag;
4233         cmd->sess = sess;
4234         cmd->loop_id = sess->loop_id;
4235         cmd->conf_compl_supported = sess->conf_compl_supported;
4236
4237         cmd->trc_flags = 0;
4238         cmd->jiffies_at_alloc = get_jiffies_64();
4239
4240         cmd->unpacked_lun = scsilun_to_int(
4241             (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4242         qlt_assign_qpair(vha, cmd);
4243         cmd->reset_count = vha->hw->base_qpair->chip_reset;
4244         cmd->vp_idx = vha->vp_idx;
4245
4246         return cmd;
4247 }
4248
4249 /* ha->hardware_lock supposed to be held on entry */
4250 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4251         struct atio_from_isp *atio)
4252 {
4253         struct qla_hw_data *ha = vha->hw;
4254         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4255         struct fc_port *sess;
4256         struct qla_tgt_cmd *cmd;
4257         unsigned long flags;
4258         port_id_t id;
4259
4260         if (unlikely(tgt->tgt_stop)) {
4261                 ql_dbg(ql_dbg_io, vha, 0x3061,
4262                     "New command while device %p is shutting down\n", tgt);
4263                 return -ENODEV;
4264         }
4265
4266         id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
4267         id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
4268         id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
4269         if (IS_SW_RESV_ADDR(id))
4270                 return -EBUSY;
4271
4272         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4273         if (unlikely(!sess))
4274                 return -EFAULT;
4275
4276         /* Another WWN used to have our s_id. Our PLOGI scheduled its
4277          * session deletion, but it's still in sess_del_work wq */
4278         if (sess->deleted) {
4279                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4280                     "New command while old session %p is being deleted\n",
4281                     sess);
4282                 return -EFAULT;
4283         }
4284
4285         /*
4286          * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4287          */
4288         if (!kref_get_unless_zero(&sess->sess_kref)) {
4289                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4290                     "%s: kref_get fail, %8phC oxid %x \n",
4291                     __func__, sess->port_name,
4292                      be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4293                 return -EFAULT;
4294         }
4295
4296         cmd = qlt_get_tag(vha, sess, atio);
4297         if (!cmd) {
4298                 ql_dbg(ql_dbg_io, vha, 0x3062,
4299                     "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4300                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4301                 ha->tgt.tgt_ops->put_sess(sess);
4302                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4303                 return -EBUSY;
4304         }
4305
4306         cmd->cmd_in_wq = 1;
4307         cmd->trc_flags |= TRC_NEW_CMD;
4308
4309         spin_lock_irqsave(&vha->cmd_list_lock, flags);
4310         list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4311         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4312
4313         INIT_WORK(&cmd->work, qlt_do_work);
4314         if (vha->flags.qpairs_available) {
4315                 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4316         } else if (ha->msix_count) {
4317                 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4318                         queue_work_on(smp_processor_id(), qla_tgt_wq,
4319                             &cmd->work);
4320                 else
4321                         queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4322                             &cmd->work);
4323         } else {
4324                 queue_work(qla_tgt_wq, &cmd->work);
4325         }
4326
4327         return 0;
4328 }
4329
4330 /* ha->hardware_lock supposed to be held on entry */
4331 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4332         int fn, void *iocb, int flags)
4333 {
4334         struct scsi_qla_host *vha = sess->vha;
4335         struct qla_hw_data *ha = vha->hw;
4336         struct qla_tgt_mgmt_cmd *mcmd;
4337         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4338         int res;
4339
4340         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4341         if (!mcmd) {
4342                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4343                     "qla_target(%d): Allocation of management "
4344                     "command failed, some commands and their data could "
4345                     "leak\n", vha->vp_idx);
4346                 return -ENOMEM;
4347         }
4348         memset(mcmd, 0, sizeof(*mcmd));
4349         mcmd->sess = sess;
4350
4351         if (iocb) {
4352                 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4353                     sizeof(mcmd->orig_iocb.imm_ntfy));
4354         }
4355         mcmd->tmr_func = fn;
4356         mcmd->flags = flags;
4357         mcmd->reset_count = ha->base_qpair->chip_reset;
4358         mcmd->qpair = ha->base_qpair;
4359         mcmd->vha = vha;
4360
4361         switch (fn) {
4362         case QLA_TGT_LUN_RESET:
4363             abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4364             break;
4365         }
4366
4367         res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
4368         if (res != 0) {
4369                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
4370                     "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
4371                     sess->vha->vp_idx, res);
4372                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4373                 return -EFAULT;
4374         }
4375
4376         return 0;
4377 }
4378
4379 /* ha->hardware_lock supposed to be held on entry */
4380 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4381 {
4382         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4383         struct qla_hw_data *ha = vha->hw;
4384         struct fc_port *sess;
4385         u64 unpacked_lun;
4386         int fn;
4387         unsigned long flags;
4388
4389         fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4390
4391         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4392         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4393             a->u.isp24.fcp_hdr.s_id);
4394         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4395
4396         unpacked_lun =
4397             scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4398
4399         if (sess == NULL || sess->deleted)
4400                 return -EFAULT;
4401
4402         return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4403 }
4404
4405 /* ha->hardware_lock supposed to be held on entry */
4406 static int __qlt_abort_task(struct scsi_qla_host *vha,
4407         struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4408 {
4409         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4410         struct qla_hw_data *ha = vha->hw;
4411         struct qla_tgt_mgmt_cmd *mcmd;
4412         u64 unpacked_lun;
4413         int rc;
4414
4415         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4416         if (mcmd == NULL) {
4417                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4418                     "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4419                     vha->vp_idx, __func__);
4420                 return -ENOMEM;
4421         }
4422         memset(mcmd, 0, sizeof(*mcmd));
4423
4424         mcmd->sess = sess;
4425         memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4426             sizeof(mcmd->orig_iocb.imm_ntfy));
4427
4428         unpacked_lun =
4429             scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4430         mcmd->reset_count = ha->base_qpair->chip_reset;
4431         mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4432         mcmd->qpair = ha->base_qpair;
4433
4434         rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4435             le16_to_cpu(iocb->u.isp2x.seq_id));
4436         if (rc != 0) {
4437                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4438                     "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4439                     vha->vp_idx, rc);
4440                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4441                 return -EFAULT;
4442         }
4443
4444         return 0;
4445 }
4446
4447 /* ha->hardware_lock supposed to be held on entry */
4448 static int qlt_abort_task(struct scsi_qla_host *vha,
4449         struct imm_ntfy_from_isp *iocb)
4450 {
4451         struct qla_hw_data *ha = vha->hw;
4452         struct fc_port *sess;
4453         int loop_id;
4454         unsigned long flags;
4455
4456         loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4457
4458         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4459         sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4460         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4461
4462         if (sess == NULL) {
4463                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4464                     "qla_target(%d): task abort for unexisting "
4465                     "session\n", vha->vp_idx);
4466                 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4467                     QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4468         }
4469
4470         return __qlt_abort_task(vha, iocb, sess);
4471 }
4472
4473 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4474 {
4475         if (rc != MBS_COMMAND_COMPLETE) {
4476                 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4477                         "%s: se_sess %p / sess %p from"
4478                         " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4479                         " LOGO failed: %#x\n",
4480                         __func__,
4481                         fcport->se_sess,
4482                         fcport,
4483                         fcport->port_name, fcport->loop_id,
4484                         fcport->d_id.b.domain, fcport->d_id.b.area,
4485                         fcport->d_id.b.al_pa, rc);
4486         }
4487
4488         fcport->logout_completed = 1;
4489 }
4490
4491 /*
4492 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4493 *
4494 * Schedules sessions with matching port_id/loop_id but different wwn for
4495 * deletion. Returns existing session with matching wwn if present.
4496 * Null otherwise.
4497 */
4498 struct fc_port *
4499 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4500     port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4501 {
4502         struct fc_port *sess = NULL, *other_sess;
4503         uint64_t other_wwn;
4504
4505         *conflict_sess = NULL;
4506
4507         list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4508
4509                 other_wwn = wwn_to_u64(other_sess->port_name);
4510
4511                 if (wwn == other_wwn) {
4512                         WARN_ON(sess);
4513                         sess = other_sess;
4514                         continue;
4515                 }
4516
4517                 /* find other sess with nport_id collision */
4518                 if (port_id.b24 == other_sess->d_id.b24) {
4519                         if (loop_id != other_sess->loop_id) {
4520                                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4521                                     "Invalidating sess %p loop_id %d wwn %llx.\n",
4522                                     other_sess, other_sess->loop_id, other_wwn);
4523
4524                                 /*
4525                                  * logout_on_delete is set by default, but another
4526                                  * session that has the same s_id/loop_id combo
4527                                  * might have cleared it when requested this session
4528                                  * deletion, so don't touch it
4529                                  */
4530                                 qlt_schedule_sess_for_deletion(other_sess);
4531                         } else {
4532                                 /*
4533                                  * Another wwn used to have our s_id/loop_id
4534                                  * kill the session, but don't free the loop_id
4535                                  */
4536                                 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4537                                     "Invalidating sess %p loop_id %d wwn %llx.\n",
4538                                     other_sess, other_sess->loop_id, other_wwn);
4539
4540                                 other_sess->keep_nport_handle = 1;
4541                                 if (other_sess->disc_state != DSC_DELETED)
4542                                         *conflict_sess = other_sess;
4543                                 qlt_schedule_sess_for_deletion(other_sess);
4544                         }
4545                         continue;
4546                 }
4547
4548                 /* find other sess with nport handle collision */
4549                 if ((loop_id == other_sess->loop_id) &&
4550                         (loop_id != FC_NO_LOOP_ID)) {
4551                         ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4552                                "Invalidating sess %p loop_id %d wwn %llx.\n",
4553                                other_sess, other_sess->loop_id, other_wwn);
4554
4555                         /* Same loop_id but different s_id
4556                          * Ok to kill and logout */
4557                         qlt_schedule_sess_for_deletion(other_sess);
4558                 }
4559         }
4560
4561         return sess;
4562 }
4563
4564 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4565 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4566 {
4567         struct qla_tgt_sess_op *op;
4568         struct qla_tgt_cmd *cmd;
4569         uint32_t key;
4570         int count = 0;
4571         unsigned long flags;
4572
4573         key = (((u32)s_id->b.domain << 16) |
4574                ((u32)s_id->b.area   <<  8) |
4575                ((u32)s_id->b.al_pa));
4576
4577         spin_lock_irqsave(&vha->cmd_list_lock, flags);
4578         list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4579                 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4580
4581                 if (op_key == key) {
4582                         op->aborted = true;
4583                         count++;
4584                 }
4585         }
4586
4587         list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4588                 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4589                 if (op_key == key) {
4590                         op->aborted = true;
4591                         count++;
4592                 }
4593         }
4594
4595         list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4596                 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4597                 if (cmd_key == key) {
4598                         cmd->aborted = 1;
4599                         count++;
4600                 }
4601         }
4602         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4603
4604         return count;
4605 }
4606
4607 static int qlt_handle_login(struct scsi_qla_host *vha,
4608     struct imm_ntfy_from_isp *iocb)
4609 {
4610         struct fc_port *sess = NULL, *conflict_sess = NULL;
4611         uint64_t wwn;
4612         port_id_t port_id;
4613         uint16_t loop_id, wd3_lo;
4614         int res = 0;
4615         struct qlt_plogi_ack_t *pla;
4616         unsigned long flags;
4617
4618         wwn = wwn_to_u64(iocb->u.isp24.port_name);
4619
4620         port_id.b.domain = iocb->u.isp24.port_id[2];
4621         port_id.b.area   = iocb->u.isp24.port_id[1];
4622         port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4623         port_id.b.rsvd_1 = 0;
4624
4625         loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4626
4627         /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4628         abort_cmds_for_s_id(vha, &port_id);
4629
4630         if (wwn) {
4631                 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4632                 sess = qlt_find_sess_invalidate_other(vha, wwn,
4633                     port_id, loop_id, &conflict_sess);
4634                 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4635         }
4636
4637         if (IS_SW_RESV_ADDR(port_id)) {
4638                 res = 1;
4639                 goto out;
4640         }
4641
4642         pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4643         if (!pla) {
4644                 qlt_send_term_imm_notif(vha, iocb, 1);
4645                 goto out;
4646         }
4647
4648         if (conflict_sess) {
4649                 conflict_sess->login_gen++;
4650                 qlt_plogi_ack_link(vha, pla, conflict_sess,
4651                     QLT_PLOGI_LINK_CONFLICT);
4652         }
4653
4654         if (!sess) {
4655                 pla->ref_count++;
4656                 ql_dbg(ql_dbg_disc, vha, 0xffff,
4657                     "%s %d %8phC post new sess\n",
4658                     __func__, __LINE__, iocb->u.isp24.port_name);
4659                 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4660                         qla24xx_post_newsess_work(vha, &port_id,
4661                             iocb->u.isp24.port_name,
4662                             iocb->u.isp24.u.plogi.node_name,
4663                             pla, FC4_TYPE_UNKNOWN);
4664                 else
4665                         qla24xx_post_newsess_work(vha, &port_id,
4666                             iocb->u.isp24.port_name, NULL,
4667                             pla, FC4_TYPE_UNKNOWN);
4668
4669                 goto out;
4670         }
4671
4672         qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4673         sess->d_id = port_id;
4674         sess->login_gen++;
4675
4676         if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4677                 sess->fw_login_state = DSC_LS_PRLI_PEND;
4678                 sess->local = 0;
4679                 sess->loop_id = loop_id;
4680                 sess->d_id = port_id;
4681                 sess->fw_login_state = DSC_LS_PRLI_PEND;
4682                 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4683
4684                 if (wd3_lo & BIT_7)
4685                         sess->conf_compl_supported = 1;
4686
4687                 if ((wd3_lo & BIT_4) == 0)
4688                         sess->port_type = FCT_INITIATOR;
4689                 else
4690                         sess->port_type = FCT_TARGET;
4691
4692         } else
4693                 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4694
4695
4696         ql_dbg(ql_dbg_disc, vha, 0x20f9,
4697             "%s %d %8phC  DS %d\n",
4698             __func__, __LINE__, sess->port_name, sess->disc_state);
4699
4700         switch (sess->disc_state) {
4701         case DSC_DELETED:
4702                 qlt_plogi_ack_unref(vha, pla);
4703                 break;
4704
4705         default:
4706                 /*
4707                  * Under normal circumstances we want to release nport handle
4708                  * during LOGO process to avoid nport handle leaks inside FW.
4709                  * The exception is when LOGO is done while another PLOGI with
4710                  * the same nport handle is waiting as might be the case here.
4711                  * Note: there is always a possibily of a race where session
4712                  * deletion has already started for other reasons (e.g. ACL
4713                  * removal) and now PLOGI arrives:
4714                  * 1. if PLOGI arrived in FW after nport handle has been freed,
4715                  *    FW must have assigned this PLOGI a new/same handle and we
4716                  *    can proceed ACK'ing it as usual when session deletion
4717                  *    completes.
4718                  * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4719                  *    bit reached it, the handle has now been released. We'll
4720                  *    get an error when we ACK this PLOGI. Nothing will be sent
4721                  *    back to initiator. Initiator should eventually retry
4722                  *    PLOGI and situation will correct itself.
4723                  */
4724                 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4725                     (sess->d_id.b24 == port_id.b24));
4726
4727                 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4728                     "%s %d %8phC post del sess\n",
4729                     __func__, __LINE__, sess->port_name);
4730
4731
4732                 qlt_schedule_sess_for_deletion(sess);
4733                 break;
4734         }
4735 out:
4736         return res;
4737 }
4738
4739 /*
4740  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4741  */
4742 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4743         struct imm_ntfy_from_isp *iocb)
4744 {
4745         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4746         struct qla_hw_data *ha = vha->hw;
4747         struct fc_port *sess = NULL, *conflict_sess = NULL;
4748         uint64_t wwn;
4749         port_id_t port_id;
4750         uint16_t loop_id;
4751         uint16_t wd3_lo;
4752         int res = 0;
4753         unsigned long flags;
4754
4755         wwn = wwn_to_u64(iocb->u.isp24.port_name);
4756
4757         port_id.b.domain = iocb->u.isp24.port_id[2];
4758         port_id.b.area   = iocb->u.isp24.port_id[1];
4759         port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4760         port_id.b.rsvd_1 = 0;
4761
4762         loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4763
4764         ql_dbg(ql_dbg_disc, vha, 0xf026,
4765             "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4766             vha->vp_idx, iocb->u.isp24.port_id[2],
4767                 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4768                    iocb->u.isp24.status_subcode, loop_id,
4769                 iocb->u.isp24.port_name);
4770
4771         /* res = 1 means ack at the end of thread
4772          * res = 0 means ack async/later.
4773          */
4774         switch (iocb->u.isp24.status_subcode) {
4775         case ELS_PLOGI:
4776                 res = qlt_handle_login(vha, iocb);
4777                 break;
4778
4779         case ELS_PRLI:
4780                 if (N2N_TOPO(ha)) {
4781                         sess = qla2x00_find_fcport_by_wwpn(vha,
4782                             iocb->u.isp24.port_name, 1);
4783
4784                         if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4785                                 ql_dbg(ql_dbg_disc, vha, 0xffff,
4786                                     "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4787                                     __func__, __LINE__,
4788                                     iocb->u.isp24.port_name);
4789                                 qlt_send_term_imm_notif(vha, iocb, 1);
4790                                 break;
4791                         }
4792
4793                         res = qlt_handle_login(vha, iocb);
4794                         break;
4795                 }
4796
4797                 if (IS_SW_RESV_ADDR(port_id)) {
4798                         res = 1;
4799                         break;
4800                 }
4801
4802                 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4803
4804                 if (wwn) {
4805                         spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4806                         sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4807                                 loop_id, &conflict_sess);
4808                         spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4809                 }
4810
4811                 if (conflict_sess) {
4812                         switch (conflict_sess->disc_state) {
4813                         case DSC_DELETED:
4814                         case DSC_DELETE_PEND:
4815                                 break;
4816                         default:
4817                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4818                                     "PRLI with conflicting sess %p port %8phC\n",
4819                                     conflict_sess, conflict_sess->port_name);
4820                                 conflict_sess->fw_login_state =
4821                                     DSC_LS_PORT_UNAVAIL;
4822                                 qlt_send_term_imm_notif(vha, iocb, 1);
4823                                 res = 0;
4824                                 break;
4825                         }
4826                 }
4827
4828                 if (sess != NULL) {
4829                         bool delete = false;
4830                         spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4831                         switch (sess->fw_login_state) {
4832                         case DSC_LS_PLOGI_PEND:
4833                         case DSC_LS_PLOGI_COMP:
4834                         case DSC_LS_PRLI_COMP:
4835                                 break;
4836                         default:
4837                                 delete = true;
4838                                 break;
4839                         }
4840
4841                         switch (sess->disc_state) {
4842                         case DSC_LOGIN_PEND:
4843                         case DSC_GPDB:
4844                         case DSC_GPSC:
4845                         case DSC_UPD_FCPORT:
4846                         case DSC_LOGIN_COMPLETE:
4847                         case DSC_ADISC:
4848                                 delete = false;
4849                                 break;
4850                         default:
4851                                 break;
4852                         }
4853
4854                         if (delete) {
4855                                 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4856                                     flags);
4857                                 /*
4858                                  * Impatient initiator sent PRLI before last
4859                                  * PLOGI could finish. Will force him to re-try,
4860                                  * while last one finishes.
4861                                  */
4862                                 ql_log(ql_log_warn, sess->vha, 0xf095,
4863                                     "sess %p PRLI received, before plogi ack.\n",
4864                                     sess);
4865                                 qlt_send_term_imm_notif(vha, iocb, 1);
4866                                 res = 0;
4867                                 break;
4868                         }
4869
4870                         /*
4871                          * This shouldn't happen under normal circumstances,
4872                          * since we have deleted the old session during PLOGI
4873                          */
4874                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4875                             "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4876                             sess->loop_id, sess, iocb->u.isp24.nport_handle);
4877
4878                         sess->local = 0;
4879                         sess->loop_id = loop_id;
4880                         sess->d_id = port_id;
4881                         sess->fw_login_state = DSC_LS_PRLI_PEND;
4882
4883                         if (wd3_lo & BIT_7)
4884                                 sess->conf_compl_supported = 1;
4885
4886                         if ((wd3_lo & BIT_4) == 0)
4887                                 sess->port_type = FCT_INITIATOR;
4888                         else
4889                                 sess->port_type = FCT_TARGET;
4890
4891                         spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4892                 }
4893                 res = 1; /* send notify ack */
4894
4895                 /* Make session global (not used in fabric mode) */
4896                 if (ha->current_topology != ISP_CFG_F) {
4897                         if (sess) {
4898                                 ql_dbg(ql_dbg_disc, vha, 0x20fa,
4899                                     "%s %d %8phC post nack\n",
4900                                     __func__, __LINE__, sess->port_name);
4901                                 qla24xx_post_nack_work(vha, sess, iocb,
4902                                         SRB_NACK_PRLI);
4903                                 res = 0;
4904                         } else {
4905                                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4906                                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4907                                 qla2xxx_wake_dpc(vha);
4908                         }
4909                 } else {
4910                         if (sess) {
4911                                 ql_dbg(ql_dbg_disc, vha, 0x20fb,
4912                                     "%s %d %8phC post nack\n",
4913                                     __func__, __LINE__, sess->port_name);
4914                                 qla24xx_post_nack_work(vha, sess, iocb,
4915                                         SRB_NACK_PRLI);
4916                                 res = 0;
4917                         }
4918                 }
4919                 break;
4920
4921         case ELS_TPRLO:
4922                 if (le16_to_cpu(iocb->u.isp24.flags) &
4923                         NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
4924                         loop_id = 0xFFFF;
4925                         qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
4926                         res = 1;
4927                         break;
4928                 }
4929                 /* fall through */
4930         case ELS_LOGO:
4931         case ELS_PRLO:
4932                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4933                 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
4934                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4935
4936                 if (sess) {
4937                         sess->login_gen++;
4938                         sess->fw_login_state = DSC_LS_LOGO_PEND;
4939                         sess->logo_ack_needed = 1;
4940                         memcpy(sess->iocb, iocb, IOCB_SIZE);
4941                 }
4942
4943                 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4944
4945                 ql_dbg(ql_dbg_disc, vha, 0x20fc,
4946                     "%s: logo %llx res %d sess %p ",
4947                     __func__, wwn, res, sess);
4948                 if (res == 0) {
4949                         /*
4950                          * cmd went upper layer, look for qlt_xmit_tm_rsp()
4951                          * for LOGO_ACK & sess delete
4952                          */
4953                         BUG_ON(!sess);
4954                         res = 0;
4955                 } else {
4956                         /* cmd did not go to upper layer. */
4957                         if (sess) {
4958                                 qlt_schedule_sess_for_deletion(sess);
4959                                 res = 0;
4960                         }
4961                         /* else logo will be ack */
4962                 }
4963                 break;
4964         case ELS_PDISC:
4965         case ELS_ADISC:
4966         {
4967                 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4968                 if (tgt->link_reinit_iocb_pending) {
4969                         qlt_send_notify_ack(ha->base_qpair,
4970                             &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
4971                         tgt->link_reinit_iocb_pending = 0;
4972                 }
4973
4974                 sess = qla2x00_find_fcport_by_wwpn(vha,
4975                     iocb->u.isp24.port_name, 1);
4976                 if (sess) {
4977                         ql_dbg(ql_dbg_disc, vha, 0x20fd,
4978                                 "sess %p lid %d|%d DS %d LS %d\n",
4979                                 sess, sess->loop_id, loop_id,
4980                                 sess->disc_state, sess->fw_login_state);
4981                 }
4982
4983                 res = 1; /* send notify ack */
4984                 break;
4985         }
4986
4987         case ELS_FLOGI: /* should never happen */
4988         default:
4989                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4990                     "qla_target(%d): Unsupported ELS command %x "
4991                     "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4992                 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4993                 break;
4994         }
4995
4996         ql_dbg(ql_dbg_disc, vha, 0xf026,
4997             "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
4998             vha->vp_idx, iocb->u.isp24.status_subcode, res);
4999
5000         return res;
5001 }
5002
5003 /*
5004  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5005  */
5006 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5007         struct imm_ntfy_from_isp *iocb)
5008 {
5009         struct qla_hw_data *ha = vha->hw;
5010         uint32_t add_flags = 0;
5011         int send_notify_ack = 1;
5012         uint16_t status;
5013
5014         status = le16_to_cpu(iocb->u.isp2x.status);
5015         switch (status) {
5016         case IMM_NTFY_LIP_RESET:
5017         {
5018                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5019                     "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5020                     vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5021                     iocb->u.isp24.status_subcode);
5022
5023                 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5024                         send_notify_ack = 0;
5025                 break;
5026         }
5027
5028         case IMM_NTFY_LIP_LINK_REINIT:
5029         {
5030                 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5031                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5032                     "qla_target(%d): LINK REINIT (loop %#x, "
5033                     "subcode %x)\n", vha->vp_idx,
5034                     le16_to_cpu(iocb->u.isp24.nport_handle),
5035                     iocb->u.isp24.status_subcode);
5036                 if (tgt->link_reinit_iocb_pending) {
5037                         qlt_send_notify_ack(ha->base_qpair,
5038                             &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5039                 }
5040                 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5041                 tgt->link_reinit_iocb_pending = 1;
5042                 /*
5043                  * QLogic requires to wait after LINK REINIT for possible
5044                  * PDISC or ADISC ELS commands
5045                  */
5046                 send_notify_ack = 0;
5047                 break;
5048         }
5049
5050         case IMM_NTFY_PORT_LOGOUT:
5051                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5052                     "qla_target(%d): Port logout (loop "
5053                     "%#x, subcode %x)\n", vha->vp_idx,
5054                     le16_to_cpu(iocb->u.isp24.nport_handle),
5055                     iocb->u.isp24.status_subcode);
5056
5057                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5058                         send_notify_ack = 0;
5059                 /* The sessions will be cleared in the callback, if needed */
5060                 break;
5061
5062         case IMM_NTFY_GLBL_TPRLO:
5063                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5064                     "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5065                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5066                         send_notify_ack = 0;
5067                 /* The sessions will be cleared in the callback, if needed */
5068                 break;
5069
5070         case IMM_NTFY_PORT_CONFIG:
5071                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5072                     "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5073                     status);
5074                 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5075                         send_notify_ack = 0;
5076                 /* The sessions will be cleared in the callback, if needed */
5077                 break;
5078
5079         case IMM_NTFY_GLBL_LOGO:
5080                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5081                     "qla_target(%d): Link failure detected\n",
5082                     vha->vp_idx);
5083                 /* I_T nexus loss */
5084                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5085                         send_notify_ack = 0;
5086                 break;
5087
5088         case IMM_NTFY_IOCB_OVERFLOW:
5089                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5090                     "qla_target(%d): Cannot provide requested "
5091                     "capability (IOCB overflowed the immediate notify "
5092                     "resource count)\n", vha->vp_idx);
5093                 break;
5094
5095         case IMM_NTFY_ABORT_TASK:
5096                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5097                     "qla_target(%d): Abort Task (S %08x I %#x -> "
5098                     "L %#x)\n", vha->vp_idx,
5099                     le16_to_cpu(iocb->u.isp2x.seq_id),
5100                     GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5101                     le16_to_cpu(iocb->u.isp2x.lun));
5102                 if (qlt_abort_task(vha, iocb) == 0)
5103                         send_notify_ack = 0;
5104                 break;
5105
5106         case IMM_NTFY_RESOURCE:
5107                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5108                     "qla_target(%d): Out of resources, host %ld\n",
5109                     vha->vp_idx, vha->host_no);
5110                 break;
5111
5112         case IMM_NTFY_MSG_RX:
5113                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5114                     "qla_target(%d): Immediate notify task %x\n",
5115                     vha->vp_idx, iocb->u.isp2x.task_flags);
5116                 if (qlt_handle_task_mgmt(vha, iocb) == 0)
5117                         send_notify_ack = 0;
5118                 break;
5119
5120         case IMM_NTFY_ELS:
5121                 if (qlt_24xx_handle_els(vha, iocb) == 0)
5122                         send_notify_ack = 0;
5123                 break;
5124         default:
5125                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5126                     "qla_target(%d): Received unknown immediate "
5127                     "notify status %x\n", vha->vp_idx, status);
5128                 break;
5129         }
5130
5131         if (send_notify_ack)
5132                 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5133                     0, 0);
5134 }
5135
5136 /*
5137  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5138  * This function sends busy to ISP 2xxx or 24xx.
5139  */
5140 static int __qlt_send_busy(struct qla_qpair *qpair,
5141         struct atio_from_isp *atio, uint16_t status)
5142 {
5143         struct scsi_qla_host *vha = qpair->vha;
5144         struct ctio7_to_24xx *ctio24;
5145         struct qla_hw_data *ha = vha->hw;
5146         request_t *pkt;
5147         struct fc_port *sess = NULL;
5148         unsigned long flags;
5149         u16 temp;
5150
5151         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5152         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5153             atio->u.isp24.fcp_hdr.s_id);
5154         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5155         if (!sess) {
5156                 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5157                 return 0;
5158         }
5159         /* Sending marker isn't necessary, since we called from ISR */
5160
5161         pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5162         if (!pkt) {
5163                 ql_dbg(ql_dbg_io, vha, 0x3063,
5164                     "qla_target(%d): %s failed: unable to allocate "
5165                     "request packet", vha->vp_idx, __func__);
5166                 return -ENOMEM;
5167         }
5168
5169         qpair->tgt_counters.num_q_full_sent++;
5170         pkt->entry_count = 1;
5171         pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5172
5173         ctio24 = (struct ctio7_to_24xx *)pkt;
5174         ctio24->entry_type = CTIO_TYPE7;
5175         ctio24->nport_handle = sess->loop_id;
5176         ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5177         ctio24->vp_index = vha->vp_idx;
5178         ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
5179         ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5180         ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5181         ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5182         temp = (atio->u.isp24.attr << 9) |
5183                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5184                 CTIO7_FLAGS_DONT_RET_CTIO;
5185         ctio24->u.status1.flags = cpu_to_le16(temp);
5186         /*
5187          * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5188          * if the explicit conformation is used.
5189          */
5190         ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5191         ctio24->u.status1.scsi_status = cpu_to_le16(status);
5192         /* Memory Barrier */
5193         wmb();
5194         if (qpair->reqq_start_iocbs)
5195                 qpair->reqq_start_iocbs(qpair);
5196         else
5197                 qla2x00_start_iocbs(vha, qpair->req);
5198         return 0;
5199 }
5200
5201 /*
5202  * This routine is used to allocate a command for either a QFull condition
5203  * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5204  * out previously.
5205  */
5206 static void
5207 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5208         struct atio_from_isp *atio, uint16_t status, int qfull)
5209 {
5210         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5211         struct qla_hw_data *ha = vha->hw;
5212         struct fc_port *sess;
5213         struct se_session *se_sess;
5214         struct qla_tgt_cmd *cmd;
5215         int tag;
5216         unsigned long flags;
5217
5218         if (unlikely(tgt->tgt_stop)) {
5219                 ql_dbg(ql_dbg_io, vha, 0x300a,
5220                         "New command while device %p is shutting down\n", tgt);
5221                 return;
5222         }
5223
5224         if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5225                 vha->hw->tgt.num_qfull_cmds_dropped++;
5226                 if (vha->hw->tgt.num_qfull_cmds_dropped >
5227                         vha->qla_stats.stat_max_qfull_cmds_dropped)
5228                         vha->qla_stats.stat_max_qfull_cmds_dropped =
5229                                 vha->hw->tgt.num_qfull_cmds_dropped;
5230
5231                 ql_dbg(ql_dbg_io, vha, 0x3068,
5232                         "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5233                         vha->vp_idx, __func__,
5234                         vha->hw->tgt.num_qfull_cmds_dropped);
5235
5236                 qlt_chk_exch_leak_thresh_hold(vha);
5237                 return;
5238         }
5239
5240         sess = ha->tgt.tgt_ops->find_sess_by_s_id
5241                 (vha, atio->u.isp24.fcp_hdr.s_id);
5242         if (!sess)
5243                 return;
5244
5245         se_sess = sess->se_sess;
5246
5247         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5248         if (tag < 0)
5249                 return;
5250
5251         cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5252         if (!cmd) {
5253                 ql_dbg(ql_dbg_io, vha, 0x3009,
5254                         "qla_target(%d): %s: Allocation of cmd failed\n",
5255                         vha->vp_idx, __func__);
5256
5257                 vha->hw->tgt.num_qfull_cmds_dropped++;
5258                 if (vha->hw->tgt.num_qfull_cmds_dropped >
5259                         vha->qla_stats.stat_max_qfull_cmds_dropped)
5260                         vha->qla_stats.stat_max_qfull_cmds_dropped =
5261                                 vha->hw->tgt.num_qfull_cmds_dropped;
5262
5263                 qlt_chk_exch_leak_thresh_hold(vha);
5264                 return;
5265         }
5266
5267         memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5268
5269         qlt_incr_num_pend_cmds(vha);
5270         INIT_LIST_HEAD(&cmd->cmd_list);
5271         memcpy(&cmd->atio, atio, sizeof(*atio));
5272
5273         cmd->tgt = vha->vha_tgt.qla_tgt;
5274         cmd->vha = vha;
5275         cmd->reset_count = ha->base_qpair->chip_reset;
5276         cmd->q_full = 1;
5277         cmd->qpair = ha->base_qpair;
5278
5279         if (qfull) {
5280                 cmd->q_full = 1;
5281                 /* NOTE: borrowing the state field to carry the status */
5282                 cmd->state = status;
5283         } else
5284                 cmd->term_exchg = 1;
5285
5286         spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5287         list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5288
5289         vha->hw->tgt.num_qfull_cmds_alloc++;
5290         if (vha->hw->tgt.num_qfull_cmds_alloc >
5291                 vha->qla_stats.stat_max_qfull_cmds_alloc)
5292                 vha->qla_stats.stat_max_qfull_cmds_alloc =
5293                         vha->hw->tgt.num_qfull_cmds_alloc;
5294         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5295 }
5296
5297 int
5298 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5299 {
5300         struct scsi_qla_host *vha = qpair->vha;
5301         struct qla_hw_data *ha = vha->hw;
5302         unsigned long flags;
5303         struct qla_tgt_cmd *cmd, *tcmd;
5304         struct list_head free_list, q_full_list;
5305         int rc = 0;
5306
5307         if (list_empty(&ha->tgt.q_full_list))
5308                 return 0;
5309
5310         INIT_LIST_HEAD(&free_list);
5311         INIT_LIST_HEAD(&q_full_list);
5312
5313         spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5314         if (list_empty(&ha->tgt.q_full_list)) {
5315                 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5316                 return 0;
5317         }
5318
5319         list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5320         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5321
5322         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5323         list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5324                 if (cmd->q_full)
5325                         /* cmd->state is a borrowed field to hold status */
5326                         rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5327                 else if (cmd->term_exchg)
5328                         rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5329
5330                 if (rc == -ENOMEM)
5331                         break;
5332
5333                 if (cmd->q_full)
5334                         ql_dbg(ql_dbg_io, vha, 0x3006,
5335                             "%s: busy sent for ox_id[%04x]\n", __func__,
5336                             be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5337                 else if (cmd->term_exchg)
5338                         ql_dbg(ql_dbg_io, vha, 0x3007,
5339                             "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5340                             be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5341                 else
5342                         ql_dbg(ql_dbg_io, vha, 0x3008,
5343                             "%s: Unexpected cmd in QFull list %p\n", __func__,
5344                             cmd);
5345
5346                 list_del(&cmd->cmd_list);
5347                 list_add_tail(&cmd->cmd_list, &free_list);
5348
5349                 /* piggy back on hardware_lock for protection */
5350                 vha->hw->tgt.num_qfull_cmds_alloc--;
5351         }
5352         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5353
5354         cmd = NULL;
5355
5356         list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5357                 list_del(&cmd->cmd_list);
5358                 /* This cmd was never sent to TCM.  There is no need
5359                  * to schedule free or call free_cmd
5360                  */
5361                 qlt_free_cmd(cmd);
5362         }
5363
5364         if (!list_empty(&q_full_list)) {
5365                 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5366                 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5367                 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5368         }
5369
5370         return rc;
5371 }
5372
5373 static void
5374 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5375     uint16_t status)
5376 {
5377         int rc = 0;
5378         struct scsi_qla_host *vha = qpair->vha;
5379
5380         rc = __qlt_send_busy(qpair, atio, status);
5381         if (rc == -ENOMEM)
5382                 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5383 }
5384
5385 static int
5386 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5387         struct atio_from_isp *atio, uint8_t ha_locked)
5388 {
5389         struct qla_hw_data *ha = vha->hw;
5390         unsigned long flags;
5391
5392         if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5393                 return 0;
5394
5395         if (!ha_locked)
5396                 spin_lock_irqsave(&ha->hardware_lock, flags);
5397         qlt_send_busy(qpair, atio, qla_sam_status);
5398         if (!ha_locked)
5399                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5400
5401         return 1;
5402 }
5403
5404 /* ha->hardware_lock supposed to be held on entry */
5405 /* called via callback from qla2xxx */
5406 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5407         struct atio_from_isp *atio, uint8_t ha_locked)
5408 {
5409         struct qla_hw_data *ha = vha->hw;
5410         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5411         int rc;
5412         unsigned long flags = 0;
5413
5414         if (unlikely(tgt == NULL)) {
5415                 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5416                     "ATIO pkt, but no tgt (ha %p)", ha);
5417                 return;
5418         }
5419         /*
5420          * In tgt_stop mode we also should allow all requests to pass.
5421          * Otherwise, some commands can stuck.
5422          */
5423
5424         tgt->atio_irq_cmd_count++;
5425
5426         switch (atio->u.raw.entry_type) {
5427         case ATIO_TYPE7:
5428                 if (unlikely(atio->u.isp24.exchange_addr ==
5429                     ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5430                         ql_dbg(ql_dbg_io, vha, 0x3065,
5431                             "qla_target(%d): ATIO_TYPE7 "
5432                             "received with UNKNOWN exchange address, "
5433                             "sending QUEUE_FULL\n", vha->vp_idx);
5434                         if (!ha_locked)
5435                                 spin_lock_irqsave(&ha->hardware_lock, flags);
5436                         qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5437                         if (!ha_locked)
5438                                 spin_unlock_irqrestore(&ha->hardware_lock,
5439                                     flags);
5440                         break;
5441                 }
5442
5443                 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5444                         rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5445                             atio, ha_locked);
5446                         if (rc != 0) {
5447                                 tgt->atio_irq_cmd_count--;
5448                                 return;
5449                         }
5450                         rc = qlt_handle_cmd_for_atio(vha, atio);
5451                 } else {
5452                         rc = qlt_handle_task_mgmt(vha, atio);
5453                 }
5454                 if (unlikely(rc != 0)) {
5455                         if (!ha_locked)
5456                                 spin_lock_irqsave(&ha->hardware_lock, flags);
5457                         switch (rc) {
5458                         case -ENODEV:
5459                                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5460                                     "qla_target: Unable to send command to target\n");
5461                                 break;
5462                         case -EBADF:
5463                                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5464                                     "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5465                                 qlt_send_term_exchange(ha->base_qpair, NULL,
5466                                     atio, 1, 0);
5467                                 break;
5468                         case -EBUSY:
5469                                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5470                                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5471                                     vha->vp_idx);
5472                                 qlt_send_busy(ha->base_qpair, atio,
5473                                     tc_sam_status);
5474                                 break;
5475                         default:
5476                                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5477                                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5478                                     vha->vp_idx);
5479                                 qlt_send_busy(ha->base_qpair, atio,
5480                                     qla_sam_status);
5481                                 break;
5482                         }
5483                         if (!ha_locked)
5484                                 spin_unlock_irqrestore(&ha->hardware_lock,
5485                                     flags);
5486                 }
5487                 break;
5488
5489         case IMMED_NOTIFY_TYPE:
5490         {
5491                 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5492                         ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5493                             "qla_target(%d): Received ATIO packet %x "
5494                             "with error status %x\n", vha->vp_idx,
5495                             atio->u.raw.entry_type,
5496                             atio->u.isp2x.entry_status);
5497                         break;
5498                 }
5499                 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5500
5501                 if (!ha_locked)
5502                         spin_lock_irqsave(&ha->hardware_lock, flags);
5503                 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5504                 if (!ha_locked)
5505                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
5506                 break;
5507         }
5508
5509         default:
5510                 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5511                     "qla_target(%d): Received unknown ATIO atio "
5512                     "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5513                 break;
5514         }
5515
5516         tgt->atio_irq_cmd_count--;
5517 }
5518
5519 /* ha->hardware_lock supposed to be held on entry */
5520 /* called via callback from qla2xxx */
5521 static void qlt_response_pkt(struct scsi_qla_host *vha,
5522         struct rsp_que *rsp, response_t *pkt)
5523 {
5524         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5525
5526         if (unlikely(tgt == NULL)) {
5527                 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5528                     "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5529                     vha->vp_idx, pkt->entry_type, vha->hw);
5530                 return;
5531         }
5532
5533         /*
5534          * In tgt_stop mode we also should allow all requests to pass.
5535          * Otherwise, some commands can stuck.
5536          */
5537
5538         switch (pkt->entry_type) {
5539         case CTIO_CRC2:
5540         case CTIO_TYPE7:
5541         {
5542                 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5543                 qlt_do_ctio_completion(vha, rsp, entry->handle,
5544                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5545                     entry);
5546                 break;
5547         }
5548
5549         case ACCEPT_TGT_IO_TYPE:
5550         {
5551                 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5552                 int rc;
5553                 if (atio->u.isp2x.status !=
5554                     cpu_to_le16(ATIO_CDB_VALID)) {
5555                         ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5556                             "qla_target(%d): ATIO with error "
5557                             "status %x received\n", vha->vp_idx,
5558                             le16_to_cpu(atio->u.isp2x.status));
5559                         break;
5560                 }
5561
5562                 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5563                 if (rc != 0)
5564                         return;
5565
5566                 rc = qlt_handle_cmd_for_atio(vha, atio);
5567                 if (unlikely(rc != 0)) {
5568                         switch (rc) {
5569                         case -ENODEV:
5570                                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5571                                     "qla_target: Unable to send command to target\n");
5572                                 break;
5573                         case -EBADF:
5574                                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5575                                     "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5576                                 qlt_send_term_exchange(rsp->qpair, NULL,
5577                                     atio, 1, 0);
5578                                 break;
5579                         case -EBUSY:
5580                                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5581                                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5582                                     vha->vp_idx);
5583                                 qlt_send_busy(rsp->qpair, atio,
5584                                     tc_sam_status);
5585                                 break;
5586                         default:
5587                                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5588                                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5589                                     vha->vp_idx);
5590                                 qlt_send_busy(rsp->qpair, atio,
5591                                     qla_sam_status);
5592                                 break;
5593                         }
5594                 }
5595         }
5596         break;
5597
5598         case CONTINUE_TGT_IO_TYPE:
5599         {
5600                 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5601                 qlt_do_ctio_completion(vha, rsp, entry->handle,
5602                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5603                     entry);
5604                 break;
5605         }
5606
5607         case CTIO_A64_TYPE:
5608         {
5609                 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5610                 qlt_do_ctio_completion(vha, rsp, entry->handle,
5611                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5612                     entry);
5613                 break;
5614         }
5615
5616         case IMMED_NOTIFY_TYPE:
5617                 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5618                 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5619                 break;
5620
5621         case NOTIFY_ACK_TYPE:
5622                 if (tgt->notify_ack_expected > 0) {
5623                         struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5624                         ql_dbg(ql_dbg_tgt, vha, 0xe036,
5625                             "NOTIFY_ACK seq %08x status %x\n",
5626                             le16_to_cpu(entry->u.isp2x.seq_id),
5627                             le16_to_cpu(entry->u.isp2x.status));
5628                         tgt->notify_ack_expected--;
5629                         if (entry->u.isp2x.status !=
5630                             cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5631                                 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5632                                     "qla_target(%d): NOTIFY_ACK "
5633                                     "failed %x\n", vha->vp_idx,
5634                                     le16_to_cpu(entry->u.isp2x.status));
5635                         }
5636                 } else {
5637                         ql_dbg(ql_dbg_tgt, vha, 0xe062,
5638                             "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5639                             vha->vp_idx);
5640                 }
5641                 break;
5642
5643         case ABTS_RECV_24XX:
5644                 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5645                     "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5646                 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5647                 break;
5648
5649         case ABTS_RESP_24XX:
5650                 if (tgt->abts_resp_expected > 0) {
5651                         struct abts_resp_from_24xx_fw *entry =
5652                                 (struct abts_resp_from_24xx_fw *)pkt;
5653                         ql_dbg(ql_dbg_tgt, vha, 0xe038,
5654                             "ABTS_RESP_24XX: compl_status %x\n",
5655                             entry->compl_status);
5656                         tgt->abts_resp_expected--;
5657                         if (le16_to_cpu(entry->compl_status) !=
5658                             ABTS_RESP_COMPL_SUCCESS) {
5659                                 if ((entry->error_subcode1 == 0x1E) &&
5660                                     (entry->error_subcode2 == 0)) {
5661                                         /*
5662                                          * We've got a race here: aborted
5663                                          * exchange not terminated, i.e.
5664                                          * response for the aborted command was
5665                                          * sent between the abort request was
5666                                          * received and processed.
5667                                          * Unfortunately, the firmware has a
5668                                          * silly requirement that all aborted
5669                                          * exchanges must be explicitely
5670                                          * terminated, otherwise it refuses to
5671                                          * send responses for the abort
5672                                          * requests. So, we have to
5673                                          * (re)terminate the exchange and retry
5674                                          * the abort response.
5675                                          */
5676                                         qlt_24xx_retry_term_exchange(vha,
5677                                             entry);
5678                                 } else
5679                                         ql_dbg(ql_dbg_tgt, vha, 0xe063,
5680                                             "qla_target(%d): ABTS_RESP_24XX "
5681                                             "failed %x (subcode %x:%x)",
5682                                             vha->vp_idx, entry->compl_status,
5683                                             entry->error_subcode1,
5684                                             entry->error_subcode2);
5685                         }
5686                 } else {
5687                         ql_dbg(ql_dbg_tgt, vha, 0xe064,
5688                             "qla_target(%d): Unexpected ABTS_RESP_24XX "
5689                             "received\n", vha->vp_idx);
5690                 }
5691                 break;
5692
5693         default:
5694                 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5695                     "qla_target(%d): Received unknown response pkt "
5696                     "type %x\n", vha->vp_idx, pkt->entry_type);
5697                 break;
5698         }
5699
5700 }
5701
5702 /*
5703  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5704  */
5705 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5706         uint16_t *mailbox)
5707 {
5708         struct qla_hw_data *ha = vha->hw;
5709         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5710         int login_code;
5711
5712         if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5713                 return;
5714
5715         if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5716             IS_QLA2100(ha))
5717                 return;
5718         /*
5719          * In tgt_stop mode we also should allow all requests to pass.
5720          * Otherwise, some commands can stuck.
5721          */
5722
5723
5724         switch (code) {
5725         case MBA_RESET:                 /* Reset */
5726         case MBA_SYSTEM_ERR:            /* System Error */
5727         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
5728         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
5729                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5730                     "qla_target(%d): System error async event %#x "
5731                     "occurred", vha->vp_idx, code);
5732                 break;
5733         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up. */
5734                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5735                 break;
5736
5737         case MBA_LOOP_UP:
5738         {
5739                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5740                     "qla_target(%d): Async LOOP_UP occurred "
5741                     "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5742                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5743                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5744                 if (tgt->link_reinit_iocb_pending) {
5745                         qlt_send_notify_ack(ha->base_qpair,
5746                             (void *)&tgt->link_reinit_iocb,
5747                             0, 0, 0, 0, 0, 0);
5748                         tgt->link_reinit_iocb_pending = 0;
5749                 }
5750                 break;
5751         }
5752
5753         case MBA_LIP_OCCURRED:
5754         case MBA_LOOP_DOWN:
5755         case MBA_LIP_RESET:
5756         case MBA_RSCN_UPDATE:
5757                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5758                     "qla_target(%d): Async event %#x occurred "
5759                     "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5760                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5761                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5762                 break;
5763
5764         case MBA_REJECTED_FCP_CMD:
5765                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5766                     "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5767                     vha->vp_idx,
5768                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5769                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5770
5771                 if (le16_to_cpu(mailbox[3]) == 1) {
5772                         /* exchange starvation. */
5773                         vha->hw->exch_starvation++;
5774                         if (vha->hw->exch_starvation > 5) {
5775                                 ql_log(ql_log_warn, vha, 0xd03a,
5776                                     "Exchange starvation-. Resetting RISC\n");
5777
5778                                 vha->hw->exch_starvation = 0;
5779                                 if (IS_P3P_TYPE(vha->hw))
5780                                         set_bit(FCOE_CTX_RESET_NEEDED,
5781                                             &vha->dpc_flags);
5782                                 else
5783                                         set_bit(ISP_ABORT_NEEDED,
5784                                             &vha->dpc_flags);
5785                                 qla2xxx_wake_dpc(vha);
5786                         }
5787                 }
5788                 break;
5789
5790         case MBA_PORT_UPDATE:
5791                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5792                     "qla_target(%d): Port update async event %#x "
5793                     "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5794                     "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5795                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5796                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5797
5798                 login_code = le16_to_cpu(mailbox[2]);
5799                 if (login_code == 0x4) {
5800                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5801                             "Async MB 2: Got PLOGI Complete\n");
5802                         vha->hw->exch_starvation = 0;
5803                 } else if (login_code == 0x7)
5804                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5805                             "Async MB 2: Port Logged Out\n");
5806                 break;
5807         default:
5808                 break;
5809         }
5810
5811 }
5812
5813 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5814         uint16_t loop_id)
5815 {
5816         fc_port_t *fcport, *tfcp, *del;
5817         int rc;
5818         unsigned long flags;
5819         u8 newfcport = 0;
5820
5821         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5822         if (!fcport) {
5823                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5824                     "qla_target(%d): Allocation of tmp FC port failed",
5825                     vha->vp_idx);
5826                 return NULL;
5827         }
5828
5829         fcport->loop_id = loop_id;
5830
5831         rc = qla24xx_gpdb_wait(vha, fcport, 0);
5832         if (rc != QLA_SUCCESS) {
5833                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5834                     "qla_target(%d): Failed to retrieve fcport "
5835                     "information -- get_port_database() returned %x "
5836                     "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5837                 kfree(fcport);
5838                 return NULL;
5839         }
5840
5841         del = NULL;
5842         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5843         tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
5844
5845         if (tfcp) {
5846                 tfcp->d_id = fcport->d_id;
5847                 tfcp->port_type = fcport->port_type;
5848                 tfcp->supported_classes = fcport->supported_classes;
5849                 tfcp->flags |= fcport->flags;
5850                 tfcp->scan_state = QLA_FCPORT_FOUND;
5851
5852                 del = fcport;
5853                 fcport = tfcp;
5854         } else {
5855                 if (vha->hw->current_topology == ISP_CFG_F)
5856                         fcport->flags |= FCF_FABRIC_DEVICE;
5857
5858                 list_add_tail(&fcport->list, &vha->vp_fcports);
5859                 if (!IS_SW_RESV_ADDR(fcport->d_id))
5860                    vha->fcport_count++;
5861                 fcport->login_gen++;
5862                 fcport->disc_state = DSC_LOGIN_COMPLETE;
5863                 fcport->login_succ = 1;
5864                 newfcport = 1;
5865         }
5866
5867         fcport->deleted = 0;
5868         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5869
5870         switch (vha->host->active_mode) {
5871         case MODE_INITIATOR:
5872         case MODE_DUAL:
5873                 if (newfcport) {
5874                         if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5875                                 ql_dbg(ql_dbg_disc, vha, 0x20fe,
5876                                    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
5877                                    __func__, __LINE__, fcport->port_name, vha->fcport_count);
5878                                 qla24xx_post_upd_fcport_work(vha, fcport);
5879                         } else {
5880                                 ql_dbg(ql_dbg_disc, vha, 0x20ff,
5881                                    "%s %d %8phC post gpsc fcp_cnt %d\n",
5882                                    __func__, __LINE__, fcport->port_name, vha->fcport_count);
5883                                 qla24xx_post_gpsc_work(vha, fcport);
5884                         }
5885                 }
5886                 break;
5887
5888         case MODE_TARGET:
5889         default:
5890                 break;
5891         }
5892         if (del)
5893                 qla2x00_free_fcport(del);
5894
5895         return fcport;
5896 }
5897
5898 /* Must be called under tgt_mutex */
5899 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
5900         uint8_t *s_id)
5901 {
5902         struct fc_port *sess = NULL;
5903         fc_port_t *fcport = NULL;
5904         int rc, global_resets;
5905         uint16_t loop_id = 0;
5906
5907         if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
5908                 /*
5909                  * This is Domain Controller, so it should be
5910                  * OK to drop SCSI commands from it.
5911                  */
5912                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5913                     "Unable to find initiator with S_ID %x:%x:%x",
5914                     s_id[0], s_id[1], s_id[2]);
5915                 return NULL;
5916         }
5917
5918         mutex_lock(&vha->vha_tgt.tgt_mutex);
5919
5920 retry:
5921         global_resets =
5922             atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5923
5924         rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5925         if (rc != 0) {
5926                 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5927
5928                 ql_log(ql_log_info, vha, 0xf071,
5929                     "qla_target(%d): Unable to find "
5930                     "initiator with S_ID %x:%x:%x",
5931                     vha->vp_idx, s_id[0], s_id[1],
5932                     s_id[2]);
5933
5934                 if (rc == -ENOENT) {
5935                         qlt_port_logo_t logo;
5936                         sid_to_portid(s_id, &logo.id);
5937                         logo.cmd_count = 1;
5938                         qlt_send_first_logo(vha, &logo);
5939                 }
5940
5941                 return NULL;
5942         }
5943
5944         fcport = qlt_get_port_database(vha, loop_id);
5945         if (!fcport) {
5946                 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5947                 return NULL;
5948         }
5949
5950         if (global_resets !=
5951             atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5952                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5953                     "qla_target(%d): global reset during session discovery "
5954                     "(counter was %d, new %d), retrying", vha->vp_idx,
5955                     global_resets,
5956                     atomic_read(&vha->vha_tgt.
5957                         qla_tgt->tgt_global_resets_count));
5958                 goto retry;
5959         }
5960
5961         sess = qlt_create_sess(vha, fcport, true);
5962
5963         mutex_unlock(&vha->vha_tgt.tgt_mutex);
5964
5965         return sess;
5966 }
5967
5968 static void qlt_abort_work(struct qla_tgt *tgt,
5969         struct qla_tgt_sess_work_param *prm)
5970 {
5971         struct scsi_qla_host *vha = tgt->vha;
5972         struct qla_hw_data *ha = vha->hw;
5973         struct fc_port *sess = NULL;
5974         unsigned long flags = 0, flags2 = 0;
5975         uint32_t be_s_id;
5976         uint8_t s_id[3];
5977         int rc;
5978
5979         spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5980
5981         if (tgt->tgt_stop)
5982                 goto out_term2;
5983
5984         s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5985         s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5986         s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5987
5988         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5989             (unsigned char *)&be_s_id);
5990         if (!sess) {
5991                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5992
5993                 sess = qlt_make_local_sess(vha, s_id);
5994                 /* sess has got an extra creation ref */
5995
5996                 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5997                 if (!sess)
5998                         goto out_term2;
5999         } else {
6000                 if (sess->deleted) {
6001                         sess = NULL;
6002                         goto out_term2;
6003                 }
6004
6005                 if (!kref_get_unless_zero(&sess->sess_kref)) {
6006                         ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6007                             "%s: kref_get fail %8phC \n",
6008                              __func__, sess->port_name);
6009                         sess = NULL;
6010                         goto out_term2;
6011                 }
6012         }
6013
6014         rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6015         ha->tgt.tgt_ops->put_sess(sess);
6016         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6017
6018         if (rc != 0)
6019                 goto out_term;
6020         return;
6021
6022 out_term2:
6023         if (sess)
6024                 ha->tgt.tgt_ops->put_sess(sess);
6025         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6026
6027 out_term:
6028         spin_lock_irqsave(&ha->hardware_lock, flags);
6029         qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6030             FCP_TMF_REJECTED, false);
6031         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6032 }
6033
6034 static void qlt_tmr_work(struct qla_tgt *tgt,
6035         struct qla_tgt_sess_work_param *prm)
6036 {
6037         struct atio_from_isp *a = &prm->tm_iocb2;
6038         struct scsi_qla_host *vha = tgt->vha;
6039         struct qla_hw_data *ha = vha->hw;
6040         struct fc_port *sess = NULL;
6041         unsigned long flags;
6042         uint8_t *s_id = NULL; /* to hide compiler warnings */
6043         int rc;
6044         u64 unpacked_lun;
6045         int fn;
6046         void *iocb;
6047
6048         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6049
6050         if (tgt->tgt_stop)
6051                 goto out_term2;
6052
6053         s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6054         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6055         if (!sess) {
6056                 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6057
6058                 sess = qlt_make_local_sess(vha, s_id);
6059                 /* sess has got an extra creation ref */
6060
6061                 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6062                 if (!sess)
6063                         goto out_term2;
6064         } else {
6065                 if (sess->deleted) {
6066                         sess = NULL;
6067                         goto out_term2;
6068                 }
6069
6070                 if (!kref_get_unless_zero(&sess->sess_kref)) {
6071                         ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6072                             "%s: kref_get fail %8phC\n",
6073                              __func__, sess->port_name);
6074                         sess = NULL;
6075                         goto out_term2;
6076                 }
6077         }
6078
6079         iocb = a;
6080         fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6081         unpacked_lun =
6082             scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6083
6084         rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6085         ha->tgt.tgt_ops->put_sess(sess);
6086         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6087
6088         if (rc != 0)
6089                 goto out_term;
6090         return;
6091
6092 out_term2:
6093         if (sess)
6094                 ha->tgt.tgt_ops->put_sess(sess);
6095         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6096 out_term:
6097         qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6098 }
6099
6100 static void qlt_sess_work_fn(struct work_struct *work)
6101 {
6102         struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6103         struct scsi_qla_host *vha = tgt->vha;
6104         unsigned long flags;
6105
6106         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6107
6108         spin_lock_irqsave(&tgt->sess_work_lock, flags);
6109         while (!list_empty(&tgt->sess_works_list)) {
6110                 struct qla_tgt_sess_work_param *prm = list_entry(
6111                     tgt->sess_works_list.next, typeof(*prm),
6112                     sess_works_list_entry);
6113
6114                 /*
6115                  * This work can be scheduled on several CPUs at time, so we
6116                  * must delete the entry to eliminate double processing
6117                  */
6118                 list_del(&prm->sess_works_list_entry);
6119
6120                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6121
6122                 switch (prm->type) {
6123                 case QLA_TGT_SESS_WORK_ABORT:
6124                         qlt_abort_work(tgt, prm);
6125                         break;
6126                 case QLA_TGT_SESS_WORK_TM:
6127                         qlt_tmr_work(tgt, prm);
6128                         break;
6129                 default:
6130                         BUG_ON(1);
6131                         break;
6132                 }
6133
6134                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6135
6136                 kfree(prm);
6137         }
6138         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6139 }
6140
6141 /* Must be called under tgt_host_action_mutex */
6142 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6143 {
6144         struct qla_tgt *tgt;
6145         int rc, i;
6146         struct qla_qpair_hint *h;
6147
6148         if (!QLA_TGT_MODE_ENABLED())
6149                 return 0;
6150
6151         if (!IS_TGT_MODE_CAPABLE(ha)) {
6152                 ql_log(ql_log_warn, base_vha, 0xe070,
6153                     "This adapter does not support target mode.\n");
6154                 return 0;
6155         }
6156
6157         ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6158             "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6159
6160         BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6161
6162         tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6163         if (!tgt) {
6164                 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6165                     "Unable to allocate struct qla_tgt\n");
6166                 return -ENOMEM;
6167         }
6168
6169         tgt->qphints = kzalloc((ha->max_qpairs + 1) *
6170             sizeof(struct qla_qpair_hint), GFP_KERNEL);
6171         if (!tgt->qphints) {
6172                 kfree(tgt);
6173                 ql_log(ql_log_warn, base_vha, 0x0197,
6174                     "Unable to allocate qpair hints.\n");
6175                 return -ENOMEM;
6176         }
6177
6178         if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6179                 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6180
6181         rc = btree_init64(&tgt->lun_qpair_map);
6182         if (rc) {
6183                 kfree(tgt->qphints);
6184                 kfree(tgt);
6185                 ql_log(ql_log_info, base_vha, 0x0198,
6186                         "Unable to initialize lun_qpair_map btree\n");
6187                 return -EIO;
6188         }
6189         h = &tgt->qphints[0];
6190         h->qpair = ha->base_qpair;
6191         INIT_LIST_HEAD(&h->hint_elem);
6192         h->cpuid = ha->base_qpair->cpuid;
6193         list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6194
6195         for (i = 0; i < ha->max_qpairs; i++) {
6196                 unsigned long flags;
6197
6198                 struct qla_qpair *qpair = ha->queue_pair_map[i];
6199                 h = &tgt->qphints[i + 1];
6200                 INIT_LIST_HEAD(&h->hint_elem);
6201                 if (qpair) {
6202                         h->qpair = qpair;
6203                         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6204                         list_add_tail(&h->hint_elem, &qpair->hints_list);
6205                         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6206                         h->cpuid = qpair->cpuid;
6207                 }
6208         }
6209
6210         tgt->ha = ha;
6211         tgt->vha = base_vha;
6212         init_waitqueue_head(&tgt->waitQ);
6213         INIT_LIST_HEAD(&tgt->del_sess_list);
6214         spin_lock_init(&tgt->sess_work_lock);
6215         INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6216         INIT_LIST_HEAD(&tgt->sess_works_list);
6217         atomic_set(&tgt->tgt_global_resets_count, 0);
6218
6219         base_vha->vha_tgt.qla_tgt = tgt;
6220
6221         ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6222                 "qla_target(%d): using 64 Bit PCI addressing",
6223                 base_vha->vp_idx);
6224         /* 3 is reserved */
6225         tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6226
6227         mutex_lock(&qla_tgt_mutex);
6228         list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6229         mutex_unlock(&qla_tgt_mutex);
6230
6231         if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6232                 ha->tgt.tgt_ops->add_target(base_vha);
6233
6234         return 0;
6235 }
6236
6237 /* Must be called under tgt_host_action_mutex */
6238 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6239 {
6240         if (!vha->vha_tgt.qla_tgt)
6241                 return 0;
6242
6243         if (vha->fc_vport) {
6244                 qlt_release(vha->vha_tgt.qla_tgt);
6245                 return 0;
6246         }
6247
6248         /* free left over qfull cmds */
6249         qlt_init_term_exchange(vha);
6250
6251         ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6252             vha->host_no, ha);
6253         qlt_release(vha->vha_tgt.qla_tgt);
6254
6255         return 0;
6256 }
6257
6258 void qlt_remove_target_resources(struct qla_hw_data *ha)
6259 {
6260         struct scsi_qla_host *node;
6261         u32 key = 0;
6262
6263         btree_for_each_safe32(&ha->tgt.host_map, key, node)
6264                 btree_remove32(&ha->tgt.host_map, key);
6265
6266         btree_destroy32(&ha->tgt.host_map);
6267 }
6268
6269 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6270         unsigned char *b)
6271 {
6272         int i;
6273
6274         pr_debug("qla2xxx HW vha->node_name: ");
6275         for (i = 0; i < WWN_SIZE; i++)
6276                 pr_debug("%02x ", vha->node_name[i]);
6277         pr_debug("\n");
6278         pr_debug("qla2xxx HW vha->port_name: ");
6279         for (i = 0; i < WWN_SIZE; i++)
6280                 pr_debug("%02x ", vha->port_name[i]);
6281         pr_debug("\n");
6282
6283         pr_debug("qla2xxx passed configfs WWPN: ");
6284         put_unaligned_be64(wwpn, b);
6285         for (i = 0; i < WWN_SIZE; i++)
6286                 pr_debug("%02x ", b[i]);
6287         pr_debug("\n");
6288 }
6289
6290 /**
6291  * qla_tgt_lport_register - register lport with external module
6292  *
6293  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6294  * @phys_wwpn:
6295  * @npiv_wwpn:
6296  * @npiv_wwnn:
6297  * @callback:  lport initialization callback for tcm_qla2xxx code
6298  */
6299 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6300                        u64 npiv_wwpn, u64 npiv_wwnn,
6301                        int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6302 {
6303         struct qla_tgt *tgt;
6304         struct scsi_qla_host *vha;
6305         struct qla_hw_data *ha;
6306         struct Scsi_Host *host;
6307         unsigned long flags;
6308         int rc;
6309         u8 b[WWN_SIZE];
6310
6311         mutex_lock(&qla_tgt_mutex);
6312         list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6313                 vha = tgt->vha;
6314                 ha = vha->hw;
6315
6316                 host = vha->host;
6317                 if (!host)
6318                         continue;
6319
6320                 if (!(host->hostt->supported_mode & MODE_TARGET))
6321                         continue;
6322
6323                 spin_lock_irqsave(&ha->hardware_lock, flags);
6324                 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6325                         pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6326                             host->host_no);
6327                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6328                         continue;
6329                 }
6330                 if (tgt->tgt_stop) {
6331                         pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6332                                  host->host_no);
6333                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6334                         continue;
6335                 }
6336                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6337
6338                 if (!scsi_host_get(host)) {
6339                         ql_dbg(ql_dbg_tgt, vha, 0xe068,
6340                             "Unable to scsi_host_get() for"
6341                             " qla2xxx scsi_host\n");
6342                         continue;
6343                 }
6344                 qlt_lport_dump(vha, phys_wwpn, b);
6345
6346                 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6347                         scsi_host_put(host);
6348                         continue;
6349                 }
6350                 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6351                 if (rc != 0)
6352                         scsi_host_put(host);
6353
6354                 mutex_unlock(&qla_tgt_mutex);
6355                 return rc;
6356         }
6357         mutex_unlock(&qla_tgt_mutex);
6358
6359         return -ENODEV;
6360 }
6361 EXPORT_SYMBOL(qlt_lport_register);
6362
6363 /**
6364  * qla_tgt_lport_deregister - Degister lport
6365  *
6366  * @vha:  Registered scsi_qla_host pointer
6367  */
6368 void qlt_lport_deregister(struct scsi_qla_host *vha)
6369 {
6370         struct qla_hw_data *ha = vha->hw;
6371         struct Scsi_Host *sh = vha->host;
6372         /*
6373          * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6374          */
6375         vha->vha_tgt.target_lport_ptr = NULL;
6376         ha->tgt.tgt_ops = NULL;
6377         /*
6378          * Release the Scsi_Host reference for the underlying qla2xxx host
6379          */
6380         scsi_host_put(sh);
6381 }
6382 EXPORT_SYMBOL(qlt_lport_deregister);
6383
6384 /* Must be called under HW lock */
6385 static void qlt_set_mode(struct scsi_qla_host *vha)
6386 {
6387         switch (ql2x_ini_mode) {
6388         case QLA2XXX_INI_MODE_DISABLED:
6389         case QLA2XXX_INI_MODE_EXCLUSIVE:
6390                 vha->host->active_mode = MODE_TARGET;
6391                 break;
6392         case QLA2XXX_INI_MODE_ENABLED:
6393                 vha->host->active_mode = MODE_UNKNOWN;
6394                 break;
6395         case QLA2XXX_INI_MODE_DUAL:
6396                 vha->host->active_mode = MODE_DUAL;
6397                 break;
6398         default:
6399                 break;
6400         }
6401 }
6402
6403 /* Must be called under HW lock */
6404 static void qlt_clear_mode(struct scsi_qla_host *vha)
6405 {
6406         switch (ql2x_ini_mode) {
6407         case QLA2XXX_INI_MODE_DISABLED:
6408                 vha->host->active_mode = MODE_UNKNOWN;
6409                 break;
6410         case QLA2XXX_INI_MODE_EXCLUSIVE:
6411                 vha->host->active_mode = MODE_INITIATOR;
6412                 break;
6413         case QLA2XXX_INI_MODE_ENABLED:
6414         case QLA2XXX_INI_MODE_DUAL:
6415                 vha->host->active_mode = MODE_INITIATOR;
6416                 break;
6417         default:
6418                 break;
6419         }
6420 }
6421
6422 /*
6423  * qla_tgt_enable_vha - NO LOCK HELD
6424  *
6425  * host_reset, bring up w/ Target Mode Enabled
6426  */
6427 void
6428 qlt_enable_vha(struct scsi_qla_host *vha)
6429 {
6430         struct qla_hw_data *ha = vha->hw;
6431         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6432         unsigned long flags;
6433         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6434
6435         if (!tgt) {
6436                 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6437                     "Unable to locate qla_tgt pointer from"
6438                     " struct qla_hw_data\n");
6439                 dump_stack();
6440                 return;
6441         }
6442
6443         spin_lock_irqsave(&ha->hardware_lock, flags);
6444         tgt->tgt_stopped = 0;
6445         qlt_set_mode(vha);
6446         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6447
6448         if (vha->vp_idx) {
6449                 qla24xx_disable_vp(vha);
6450                 qla24xx_enable_vp(vha);
6451         } else {
6452                 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6453                 qla2xxx_wake_dpc(base_vha);
6454                 qla2x00_wait_for_hba_online(base_vha);
6455         }
6456 }
6457 EXPORT_SYMBOL(qlt_enable_vha);
6458
6459 /*
6460  * qla_tgt_disable_vha - NO LOCK HELD
6461  *
6462  * Disable Target Mode and reset the adapter
6463  */
6464 static void qlt_disable_vha(struct scsi_qla_host *vha)
6465 {
6466         struct qla_hw_data *ha = vha->hw;
6467         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6468         unsigned long flags;
6469
6470         if (!tgt) {
6471                 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6472                     "Unable to locate qla_tgt pointer from"
6473                     " struct qla_hw_data\n");
6474                 dump_stack();
6475                 return;
6476         }
6477
6478         spin_lock_irqsave(&ha->hardware_lock, flags);
6479         qlt_clear_mode(vha);
6480         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6481
6482         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6483         qla2xxx_wake_dpc(vha);
6484         qla2x00_wait_for_hba_online(vha);
6485 }
6486
6487 /*
6488  * Called from qla_init.c:qla24xx_vport_create() contex to setup
6489  * the target mode specific struct scsi_qla_host and struct qla_hw_data
6490  * members.
6491  */
6492 void
6493 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6494 {
6495         vha->vha_tgt.qla_tgt = NULL;
6496
6497         mutex_init(&vha->vha_tgt.tgt_mutex);
6498         mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6499
6500         qlt_clear_mode(vha);
6501
6502         /*
6503          * NOTE: Currently the value is kept the same for <24xx and
6504          * >=24xx ISPs. If it is necessary to change it,
6505          * the check should be added for specific ISPs,
6506          * assigning the value appropriately.
6507          */
6508         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6509
6510         qlt_add_target(ha, vha);
6511 }
6512
6513 u8
6514 qlt_rff_id(struct scsi_qla_host *vha)
6515 {
6516         u8 fc4_feature = 0;
6517         /*
6518          * FC-4 Feature bit 0 indicates target functionality to the name server.
6519          */
6520         if (qla_tgt_mode_enabled(vha)) {
6521                 fc4_feature = BIT_0;
6522         } else if (qla_ini_mode_enabled(vha)) {
6523                 fc4_feature = BIT_1;
6524         } else if (qla_dual_mode_enabled(vha))
6525                 fc4_feature = BIT_0 | BIT_1;
6526
6527         return fc4_feature;
6528 }
6529
6530 /*
6531  * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6532  * @ha: HA context
6533  *
6534  * Beginning of ATIO ring has initialization control block already built
6535  * by nvram config routine.
6536  *
6537  * Returns 0 on success.
6538  */
6539 void
6540 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6541 {
6542         struct qla_hw_data *ha = vha->hw;
6543         uint16_t cnt;
6544         struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6545
6546         if (qla_ini_mode_enabled(vha))
6547                 return;
6548
6549         for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6550                 pkt->u.raw.signature = ATIO_PROCESSED;
6551                 pkt++;
6552         }
6553
6554 }
6555
6556 /*
6557  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6558  * @ha: SCSI driver HA context
6559  */
6560 void
6561 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6562 {
6563         struct qla_hw_data *ha = vha->hw;
6564         struct atio_from_isp *pkt;
6565         int cnt, i;
6566
6567         if (!ha->flags.fw_started)
6568                 return;
6569
6570         while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6571             fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6572                 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6573                 cnt = pkt->u.raw.entry_count;
6574
6575                 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6576                         /*
6577                          * This packet is corrupted. The header + payload
6578                          * can not be trusted. There is no point in passing
6579                          * it further up.
6580                          */
6581                         ql_log(ql_log_warn, vha, 0xd03c,
6582                             "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6583                             pkt->u.isp24.fcp_hdr.s_id,
6584                             be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6585                             le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6586
6587                         adjust_corrupted_atio(pkt);
6588                         qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6589                             ha_locked, 0);
6590                 } else {
6591                         qlt_24xx_atio_pkt_all_vps(vha,
6592                             (struct atio_from_isp *)pkt, ha_locked);
6593                 }
6594
6595                 for (i = 0; i < cnt; i++) {
6596                         ha->tgt.atio_ring_index++;
6597                         if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6598                                 ha->tgt.atio_ring_index = 0;
6599                                 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6600                         } else
6601                                 ha->tgt.atio_ring_ptr++;
6602
6603                         pkt->u.raw.signature = ATIO_PROCESSED;
6604                         pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6605                 }
6606                 wmb();
6607         }
6608
6609         /* Adjust ring index */
6610         WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6611 }
6612
6613 void
6614 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6615 {
6616         struct qla_hw_data *ha = vha->hw;
6617         struct qla_msix_entry *msix = &ha->msix_entries[2];
6618         struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6619
6620         if (!QLA_TGT_MODE_ENABLED())
6621                 return;
6622
6623         WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6624         WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6625         RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6626
6627         if (ha->flags.msix_enabled) {
6628                 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6629                         if (IS_QLA2071(ha)) {
6630                                 /* 4 ports Baker: Enable Interrupt Handshake */
6631                                 icb->msix_atio = 0;
6632                                 icb->firmware_options_2 |= BIT_26;
6633                         } else {
6634                                 icb->msix_atio = cpu_to_le16(msix->entry);
6635                                 icb->firmware_options_2 &= ~BIT_26;
6636                         }
6637                         ql_dbg(ql_dbg_init, vha, 0xf072,
6638                             "Registering ICB vector 0x%x for atio que.\n",
6639                             msix->entry);
6640                 }
6641         } else {
6642                 /* INTx|MSI */
6643                 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6644                         icb->msix_atio = 0;
6645                         icb->firmware_options_2 |= BIT_26;
6646                         ql_dbg(ql_dbg_init, vha, 0xf072,
6647                             "%s: Use INTx for ATIOQ.\n", __func__);
6648                 }
6649         }
6650 }
6651
6652 void
6653 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6654 {
6655         struct qla_hw_data *ha = vha->hw;
6656         u32 tmp;
6657
6658         if (!QLA_TGT_MODE_ENABLED())
6659                 return;
6660
6661         if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6662                 if (!ha->tgt.saved_set) {
6663                         /* We save only once */
6664                         ha->tgt.saved_exchange_count = nv->exchange_count;
6665                         ha->tgt.saved_firmware_options_1 =
6666                             nv->firmware_options_1;
6667                         ha->tgt.saved_firmware_options_2 =
6668                             nv->firmware_options_2;
6669                         ha->tgt.saved_firmware_options_3 =
6670                             nv->firmware_options_3;
6671                         ha->tgt.saved_set = 1;
6672                 }
6673
6674                 if (qla_tgt_mode_enabled(vha))
6675                         nv->exchange_count = cpu_to_le16(0xFFFF);
6676                 else                    /* dual */
6677                         nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6678
6679                 /* Enable target mode */
6680                 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6681
6682                 /* Disable ini mode, if requested */
6683                 if (qla_tgt_mode_enabled(vha))
6684                         nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6685
6686                 /* Disable Full Login after LIP */
6687                 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6688                 /* Enable initial LIP */
6689                 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6690                 if (ql2xtgt_tape_enable)
6691                         /* Enable FC Tape support */
6692                         nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6693                 else
6694                         /* Disable FC Tape support */
6695                         nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6696
6697                 /* Disable Full Login after LIP */
6698                 nv->host_p &= cpu_to_le32(~BIT_10);
6699
6700                 /*
6701                  * clear BIT 15 explicitly as we have seen at least
6702                  * a couple of instances where this was set and this
6703                  * was causing the firmware to not be initialized.
6704                  */
6705                 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6706                 /* Enable target PRLI control */
6707                 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6708
6709                 if (IS_QLA25XX(ha)) {
6710                         /* Change Loop-prefer to Pt-Pt */
6711                         tmp = ~(BIT_4|BIT_5|BIT_6);
6712                         nv->firmware_options_2 &= cpu_to_le32(tmp);
6713                         tmp = P2P << 4;
6714                         nv->firmware_options_2 |= cpu_to_le32(tmp);
6715                 }
6716         } else {
6717                 if (ha->tgt.saved_set) {
6718                         nv->exchange_count = ha->tgt.saved_exchange_count;
6719                         nv->firmware_options_1 =
6720                             ha->tgt.saved_firmware_options_1;
6721                         nv->firmware_options_2 =
6722                             ha->tgt.saved_firmware_options_2;
6723                         nv->firmware_options_3 =
6724                             ha->tgt.saved_firmware_options_3;
6725                 }
6726                 return;
6727         }
6728
6729         if (ha->base_qpair->enable_class_2) {
6730                 if (vha->flags.init_done)
6731                         fc_host_supported_classes(vha->host) =
6732                                 FC_COS_CLASS2 | FC_COS_CLASS3;
6733
6734                 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6735         } else {
6736                 if (vha->flags.init_done)
6737                         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6738
6739                 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6740         }
6741 }
6742
6743 void
6744 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6745         struct init_cb_24xx *icb)
6746 {
6747         struct qla_hw_data *ha = vha->hw;
6748
6749         if (!QLA_TGT_MODE_ENABLED())
6750                 return;
6751
6752         if (ha->tgt.node_name_set) {
6753                 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6754                 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6755         }
6756
6757         /* disable ZIO at start time. */
6758         if (!vha->flags.init_done) {
6759                 uint32_t tmp;
6760                 tmp = le32_to_cpu(icb->firmware_options_2);
6761                 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6762                 icb->firmware_options_2 = cpu_to_le32(tmp);
6763         }
6764 }
6765
6766 void
6767 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6768 {
6769         struct qla_hw_data *ha = vha->hw;
6770         u32 tmp;
6771
6772         if (!QLA_TGT_MODE_ENABLED())
6773                 return;
6774
6775         if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6776                 if (!ha->tgt.saved_set) {
6777                         /* We save only once */
6778                         ha->tgt.saved_exchange_count = nv->exchange_count;
6779                         ha->tgt.saved_firmware_options_1 =
6780                             nv->firmware_options_1;
6781                         ha->tgt.saved_firmware_options_2 =
6782                             nv->firmware_options_2;
6783                         ha->tgt.saved_firmware_options_3 =
6784                             nv->firmware_options_3;
6785                         ha->tgt.saved_set = 1;
6786                 }
6787
6788                 if (qla_tgt_mode_enabled(vha))
6789                         nv->exchange_count = cpu_to_le16(0xFFFF);
6790                 else                    /* dual */
6791                         nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6792
6793                 /* Enable target mode */
6794                 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6795
6796                 /* Disable ini mode, if requested */
6797                 if (qla_tgt_mode_enabled(vha))
6798                         nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6799                 /* Disable Full Login after LIP */
6800                 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6801                 /* Enable initial LIP */
6802                 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6803                 /*
6804                  * clear BIT 15 explicitly as we have seen at
6805                  * least a couple of instances where this was set
6806                  * and this was causing the firmware to not be
6807                  * initialized.
6808                  */
6809                 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6810                 if (ql2xtgt_tape_enable)
6811                         /* Enable FC tape support */
6812                         nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6813                 else
6814                         /* Disable FC tape support */
6815                         nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6816
6817                 /* Disable Full Login after LIP */
6818                 nv->host_p &= cpu_to_le32(~BIT_10);
6819                 /* Enable target PRLI control */
6820                 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6821
6822                 /* Change Loop-prefer to Pt-Pt */
6823                 tmp = ~(BIT_4|BIT_5|BIT_6);
6824                 nv->firmware_options_2 &= cpu_to_le32(tmp);
6825                 tmp = P2P << 4;
6826                 nv->firmware_options_2 |= cpu_to_le32(tmp);
6827         } else {
6828                 if (ha->tgt.saved_set) {
6829                         nv->exchange_count = ha->tgt.saved_exchange_count;
6830                         nv->firmware_options_1 =
6831                             ha->tgt.saved_firmware_options_1;
6832                         nv->firmware_options_2 =
6833                             ha->tgt.saved_firmware_options_2;
6834                         nv->firmware_options_3 =
6835                             ha->tgt.saved_firmware_options_3;
6836                 }
6837                 return;
6838         }
6839
6840         if (ha->base_qpair->enable_class_2) {
6841                 if (vha->flags.init_done)
6842                         fc_host_supported_classes(vha->host) =
6843                                 FC_COS_CLASS2 | FC_COS_CLASS3;
6844
6845                 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6846         } else {
6847                 if (vha->flags.init_done)
6848                         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6849
6850                 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6851         }
6852 }
6853
6854 void
6855 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6856         struct init_cb_81xx *icb)
6857 {
6858         struct qla_hw_data *ha = vha->hw;
6859
6860         if (!QLA_TGT_MODE_ENABLED())
6861                 return;
6862
6863         if (ha->tgt.node_name_set) {
6864                 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6865                 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6866         }
6867
6868         /* disable ZIO at start time. */
6869         if (!vha->flags.init_done) {
6870                 uint32_t tmp;
6871                 tmp = le32_to_cpu(icb->firmware_options_2);
6872                 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6873                 icb->firmware_options_2 = cpu_to_le32(tmp);
6874         }
6875
6876 }
6877
6878 void
6879 qlt_83xx_iospace_config(struct qla_hw_data *ha)
6880 {
6881         if (!QLA_TGT_MODE_ENABLED())
6882                 return;
6883
6884         ha->msix_count += 1; /* For ATIO Q */
6885 }
6886
6887
6888 void
6889 qlt_modify_vp_config(struct scsi_qla_host *vha,
6890         struct vp_config_entry_24xx *vpmod)
6891 {
6892         /* enable target mode.  Bit5 = 1 => disable */
6893         if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
6894                 vpmod->options_idx1 &= ~BIT_5;
6895
6896         /* Disable ini mode, if requested.  bit4 = 1 => disable */
6897         if (qla_tgt_mode_enabled(vha))
6898                 vpmod->options_idx1 &= ~BIT_4;
6899 }
6900
6901 void
6902 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6903 {
6904         int rc;
6905
6906         if (!QLA_TGT_MODE_ENABLED())
6907                 return;
6908
6909         if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6910                 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6911                 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6912         } else {
6913                 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6914                 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6915         }
6916
6917         mutex_init(&base_vha->vha_tgt.tgt_mutex);
6918         mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6919
6920         INIT_LIST_HEAD(&base_vha->unknown_atio_list);
6921         INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
6922             qlt_unknown_atio_work_fn);
6923
6924         qlt_clear_mode(base_vha);
6925
6926         rc = btree_init32(&ha->tgt.host_map);
6927         if (rc)
6928                 ql_log(ql_log_info, base_vha, 0xd03d,
6929                     "Unable to initialize ha->host_map btree\n");
6930
6931         qlt_update_vp_map(base_vha, SET_VP_IDX);
6932 }
6933
6934 irqreturn_t
6935 qla83xx_msix_atio_q(int irq, void *dev_id)
6936 {
6937         struct rsp_que *rsp;
6938         scsi_qla_host_t *vha;
6939         struct qla_hw_data *ha;
6940         unsigned long flags;
6941
6942         rsp = (struct rsp_que *) dev_id;
6943         ha = rsp->hw;
6944         vha = pci_get_drvdata(ha->pdev);
6945
6946         spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6947
6948         qlt_24xx_process_atio_queue(vha, 0);
6949
6950         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6951
6952         return IRQ_HANDLED;
6953 }
6954
6955 static void
6956 qlt_handle_abts_recv_work(struct work_struct *work)
6957 {
6958         struct qla_tgt_sess_op *op = container_of(work,
6959                 struct qla_tgt_sess_op, work);
6960         scsi_qla_host_t *vha = op->vha;
6961         struct qla_hw_data *ha = vha->hw;
6962         unsigned long flags;
6963
6964         if (qla2x00_reset_active(vha) ||
6965             (op->chip_reset != ha->base_qpair->chip_reset))
6966                 return;
6967
6968         spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6969         qlt_24xx_process_atio_queue(vha, 0);
6970         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6971
6972         spin_lock_irqsave(&ha->hardware_lock, flags);
6973         qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
6974         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6975
6976         kfree(op);
6977 }
6978
6979 void
6980 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
6981     response_t *pkt)
6982 {
6983         struct qla_tgt_sess_op *op;
6984
6985         op = kzalloc(sizeof(*op), GFP_ATOMIC);
6986
6987         if (!op) {
6988                 /* do not reach for ATIO queue here.  This is best effort err
6989                  * recovery at this point.
6990                  */
6991                 qlt_response_pkt_all_vps(vha, rsp, pkt);
6992                 return;
6993         }
6994
6995         memcpy(&op->atio, pkt, sizeof(*pkt));
6996         op->vha = vha;
6997         op->chip_reset = vha->hw->base_qpair->chip_reset;
6998         op->rsp = rsp;
6999         INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7000         queue_work(qla_tgt_wq, &op->work);
7001         return;
7002 }
7003
7004 int
7005 qlt_mem_alloc(struct qla_hw_data *ha)
7006 {
7007         if (!QLA_TGT_MODE_ENABLED())
7008                 return 0;
7009
7010         ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
7011             MAX_MULTI_ID_FABRIC, GFP_KERNEL);
7012         if (!ha->tgt.tgt_vp_map)
7013                 return -ENOMEM;
7014
7015         ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7016             (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7017             &ha->tgt.atio_dma, GFP_KERNEL);
7018         if (!ha->tgt.atio_ring) {
7019                 kfree(ha->tgt.tgt_vp_map);
7020                 return -ENOMEM;
7021         }
7022         return 0;
7023 }
7024
7025 void
7026 qlt_mem_free(struct qla_hw_data *ha)
7027 {
7028         if (!QLA_TGT_MODE_ENABLED())
7029                 return;
7030
7031         if (ha->tgt.atio_ring) {
7032                 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7033                     sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7034                     ha->tgt.atio_dma);
7035         }
7036         kfree(ha->tgt.tgt_vp_map);
7037 }
7038
7039 /* vport_slock to be held by the caller */
7040 void
7041 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7042 {
7043         void *slot;
7044         u32 key;
7045         int rc;
7046
7047         if (!QLA_TGT_MODE_ENABLED())
7048                 return;
7049
7050         key = vha->d_id.b24;
7051
7052         switch (cmd) {
7053         case SET_VP_IDX:
7054                 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7055                 break;
7056         case SET_AL_PA:
7057                 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7058                 if (!slot) {
7059                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7060                             "Save vha in host_map %p %06x\n", vha, key);
7061                         rc = btree_insert32(&vha->hw->tgt.host_map,
7062                                 key, vha, GFP_ATOMIC);
7063                         if (rc)
7064                                 ql_log(ql_log_info, vha, 0xd03e,
7065                                     "Unable to insert s_id into host_map: %06x\n",
7066                                     key);
7067                         return;
7068                 }
7069                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7070                     "replace existing vha in host_map %p %06x\n", vha, key);
7071                 btree_update32(&vha->hw->tgt.host_map, key, vha);
7072                 break;
7073         case RESET_VP_IDX:
7074                 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7075                 break;
7076         case RESET_AL_PA:
7077                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7078                    "clear vha in host_map %p %06x\n", vha, key);
7079                 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7080                 if (slot)
7081                         btree_remove32(&vha->hw->tgt.host_map, key);
7082                 vha->d_id.b24 = 0;
7083                 break;
7084         }
7085 }
7086
7087 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7088 {
7089
7090         if (!vha->d_id.b24) {
7091                 vha->d_id = id;
7092                 qlt_update_vp_map(vha, SET_AL_PA);
7093         } else if (vha->d_id.b24 != id.b24) {
7094                 qlt_update_vp_map(vha, RESET_AL_PA);
7095                 vha->d_id = id;
7096                 qlt_update_vp_map(vha, SET_AL_PA);
7097         }
7098 }
7099
7100 static int __init qlt_parse_ini_mode(void)
7101 {
7102         if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7103                 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7104         else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7105                 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7106         else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7107                 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7108         else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7109                 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7110         else
7111                 return false;
7112
7113         return true;
7114 }
7115
7116 int __init qlt_init(void)
7117 {
7118         int ret;
7119
7120         if (!qlt_parse_ini_mode()) {
7121                 ql_log(ql_log_fatal, NULL, 0xe06b,
7122                     "qlt_parse_ini_mode() failed\n");
7123                 return -EINVAL;
7124         }
7125
7126         if (!QLA_TGT_MODE_ENABLED())
7127                 return 0;
7128
7129         qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7130             sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7131             qla_tgt_mgmt_cmd), 0, NULL);
7132         if (!qla_tgt_mgmt_cmd_cachep) {
7133                 ql_log(ql_log_fatal, NULL, 0xd04b,
7134                     "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7135                 return -ENOMEM;
7136         }
7137
7138         qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7139             sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7140             0, NULL);
7141
7142         if (!qla_tgt_plogi_cachep) {
7143                 ql_log(ql_log_fatal, NULL, 0xe06d,
7144                     "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7145                 ret = -ENOMEM;
7146                 goto out_mgmt_cmd_cachep;
7147         }
7148
7149         qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7150             mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7151         if (!qla_tgt_mgmt_cmd_mempool) {
7152                 ql_log(ql_log_fatal, NULL, 0xe06e,
7153                     "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7154                 ret = -ENOMEM;
7155                 goto out_plogi_cachep;
7156         }
7157
7158         qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7159         if (!qla_tgt_wq) {
7160                 ql_log(ql_log_fatal, NULL, 0xe06f,
7161                     "alloc_workqueue for qla_tgt_wq failed\n");
7162                 ret = -ENOMEM;
7163                 goto out_cmd_mempool;
7164         }
7165         /*
7166          * Return 1 to signal that initiator-mode is being disabled
7167          */
7168         return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7169
7170 out_cmd_mempool:
7171         mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7172 out_plogi_cachep:
7173         kmem_cache_destroy(qla_tgt_plogi_cachep);
7174 out_mgmt_cmd_cachep:
7175         kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7176         return ret;
7177 }
7178
7179 void qlt_exit(void)
7180 {
7181         if (!QLA_TGT_MODE_ENABLED())
7182                 return;
7183
7184         destroy_workqueue(qla_tgt_wq);
7185         mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7186         kmem_cache_destroy(qla_tgt_plogi_cachep);
7187         kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7188 }