scsi: qla2xxx: remove double assignment in qla2x00_update_fcport
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140                            CONTINUE_A64_TYPE, &cont_pkt->entry_type);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         struct dsd32    *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
205
206         /* No data transfer */
207         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208                 cmd_pkt->byte_count = cpu_to_le32(0);
209                 return;
210         }
211
212         vha = sp->vha;
213         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214
215         /* Three DSDs are available in the Command Type 2 IOCB */
216         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217         cur_dsd = cmd_pkt->dsd32;
218
219         /* Load data segments */
220         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221                 cont_entry_t *cont_pkt;
222
223                 /* Allocate additional continuation packets? */
224                 if (avail_dsds == 0) {
225                         /*
226                          * Seven DSDs are available in the Continuation
227                          * Type 0 IOCB.
228                          */
229                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230                         cur_dsd = cont_pkt->dsd;
231                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
232                 }
233
234                 append_dsd32(&cur_dsd, sg);
235                 avail_dsds--;
236         }
237 }
238
239 /**
240  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241  * capable IOCB types.
242  *
243  * @sp: SRB command to process
244  * @cmd_pkt: Command type 3 IOCB
245  * @tot_dsds: Total number of segments to transfer
246  */
247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248     uint16_t tot_dsds)
249 {
250         uint16_t        avail_dsds;
251         struct dsd64    *cur_dsd;
252         scsi_qla_host_t *vha;
253         struct scsi_cmnd *cmd;
254         struct scatterlist *sg;
255         int i;
256
257         cmd = GET_CMD_SP(sp);
258
259         /* Update entry type to indicate Command Type 3 IOCB */
260         put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
261
262         /* No data transfer */
263         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
264                 cmd_pkt->byte_count = cpu_to_le32(0);
265                 return;
266         }
267
268         vha = sp->vha;
269         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
270
271         /* Two DSDs are available in the Command Type 3 IOCB */
272         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273         cur_dsd = cmd_pkt->dsd64;
274
275         /* Load data segments */
276         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
277                 cont_a64_entry_t *cont_pkt;
278
279                 /* Allocate additional continuation packets? */
280                 if (avail_dsds == 0) {
281                         /*
282                          * Five DSDs are available in the Continuation
283                          * Type 1 IOCB.
284                          */
285                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
286                         cur_dsd = cont_pkt->dsd;
287                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
288                 }
289
290                 append_dsd64(&cur_dsd, sg);
291                 avail_dsds--;
292         }
293 }
294
295 /**
296  * qla2x00_start_scsi() - Send a SCSI command to the ISP
297  * @sp: command to send to the ISP
298  *
299  * Returns non-zero if a failure occurred, else zero.
300  */
301 int
302 qla2x00_start_scsi(srb_t *sp)
303 {
304         int             nseg;
305         unsigned long   flags;
306         scsi_qla_host_t *vha;
307         struct scsi_cmnd *cmd;
308         uint32_t        *clr_ptr;
309         uint32_t        index;
310         uint32_t        handle;
311         cmd_entry_t     *cmd_pkt;
312         uint16_t        cnt;
313         uint16_t        req_cnt;
314         uint16_t        tot_dsds;
315         struct device_reg_2xxx __iomem *reg;
316         struct qla_hw_data *ha;
317         struct req_que *req;
318         struct rsp_que *rsp;
319
320         /* Setup device pointers. */
321         vha = sp->vha;
322         ha = vha->hw;
323         reg = &ha->iobase->isp;
324         cmd = GET_CMD_SP(sp);
325         req = ha->req_q_map[0];
326         rsp = ha->rsp_q_map[0];
327         /* So we know we haven't pci_map'ed anything yet */
328         tot_dsds = 0;
329
330         /* Send marker if required */
331         if (vha->marker_needed != 0) {
332                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
333                     QLA_SUCCESS) {
334                         return (QLA_FUNCTION_FAILED);
335                 }
336                 vha->marker_needed = 0;
337         }
338
339         /* Acquire ring specific lock */
340         spin_lock_irqsave(&ha->hardware_lock, flags);
341
342         /* Check for room in outstanding command list. */
343         handle = req->current_outstanding_cmd;
344         for (index = 1; index < req->num_outstanding_cmds; index++) {
345                 handle++;
346                 if (handle == req->num_outstanding_cmds)
347                         handle = 1;
348                 if (!req->outstanding_cmds[handle])
349                         break;
350         }
351         if (index == req->num_outstanding_cmds)
352                 goto queuing_error;
353
354         /* Map the sg table so we have an accurate count of sg entries needed */
355         if (scsi_sg_count(cmd)) {
356                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
357                     scsi_sg_count(cmd), cmd->sc_data_direction);
358                 if (unlikely(!nseg))
359                         goto queuing_error;
360         } else
361                 nseg = 0;
362
363         tot_dsds = nseg;
364
365         /* Calculate the number of request entries needed. */
366         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
367         if (req->cnt < (req_cnt + 2)) {
368                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
369                 if (req->ring_index < cnt)
370                         req->cnt = cnt - req->ring_index;
371                 else
372                         req->cnt = req->length -
373                             (req->ring_index - cnt);
374                 /* If still no head room then bail out */
375                 if (req->cnt < (req_cnt + 2))
376                         goto queuing_error;
377         }
378
379         /* Build command packet */
380         req->current_outstanding_cmd = handle;
381         req->outstanding_cmds[handle] = sp;
382         sp->handle = handle;
383         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
384         req->cnt -= req_cnt;
385
386         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
387         cmd_pkt->handle = handle;
388         /* Zero out remaining portion of packet. */
389         clr_ptr = (uint32_t *)cmd_pkt + 2;
390         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
391         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
392
393         /* Set target ID and LUN number*/
394         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
395         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
396         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
397
398         /* Load SCSI command packet. */
399         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
400         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
401
402         /* Build IOCB segments */
403         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
404
405         /* Set total data segment count. */
406         cmd_pkt->entry_count = (uint8_t)req_cnt;
407         wmb();
408
409         /* Adjust ring index. */
410         req->ring_index++;
411         if (req->ring_index == req->length) {
412                 req->ring_index = 0;
413                 req->ring_ptr = req->ring;
414         } else
415                 req->ring_ptr++;
416
417         sp->flags |= SRB_DMA_VALID;
418
419         /* Set chip new ring index. */
420         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
421         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
422
423         /* Manage unprocessed RIO/ZIO commands in response queue. */
424         if (vha->flags.process_response_queue &&
425             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
426                 qla2x00_process_response_queue(rsp);
427
428         spin_unlock_irqrestore(&ha->hardware_lock, flags);
429         return (QLA_SUCCESS);
430
431 queuing_error:
432         if (tot_dsds)
433                 scsi_dma_unmap(cmd);
434
435         spin_unlock_irqrestore(&ha->hardware_lock, flags);
436
437         return (QLA_FUNCTION_FAILED);
438 }
439
440 /**
441  * qla2x00_start_iocbs() - Execute the IOCB command
442  * @vha: HA context
443  * @req: request queue
444  */
445 void
446 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
447 {
448         struct qla_hw_data *ha = vha->hw;
449         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
450
451         if (IS_P3P_TYPE(ha)) {
452                 qla82xx_start_iocbs(vha);
453         } else {
454                 /* Adjust ring index. */
455                 req->ring_index++;
456                 if (req->ring_index == req->length) {
457                         req->ring_index = 0;
458                         req->ring_ptr = req->ring;
459                 } else
460                         req->ring_ptr++;
461
462                 /* Set chip new ring index. */
463                 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
464                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
465                 } else if (IS_QLA83XX(ha)) {
466                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
467                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
468                 } else if (IS_QLAFX00(ha)) {
469                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
470                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
471                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
472                 } else if (IS_FWI2_CAPABLE(ha)) {
473                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
475                 } else {
476                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
477                                 req->ring_index);
478                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
479                 }
480         }
481 }
482
483 /**
484  * qla2x00_marker() - Send a marker IOCB to the firmware.
485  * @vha: HA context
486  * @qpair: queue pair pointer
487  * @loop_id: loop ID
488  * @lun: LUN
489  * @type: marker modifier
490  *
491  * Can be called from both normal and interrupt context.
492  *
493  * Returns non-zero if a failure occurred, else zero.
494  */
495 static int
496 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
497     uint16_t loop_id, uint64_t lun, uint8_t type)
498 {
499         mrk_entry_t *mrk;
500         struct mrk_entry_24xx *mrk24 = NULL;
501         struct req_que *req = qpair->req;
502         struct qla_hw_data *ha = vha->hw;
503         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
504
505         mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
506         if (mrk == NULL) {
507                 ql_log(ql_log_warn, base_vha, 0x3026,
508                     "Failed to allocate Marker IOCB.\n");
509
510                 return (QLA_FUNCTION_FAILED);
511         }
512
513         mrk->entry_type = MARKER_TYPE;
514         mrk->modifier = type;
515         if (type != MK_SYNC_ALL) {
516                 if (IS_FWI2_CAPABLE(ha)) {
517                         mrk24 = (struct mrk_entry_24xx *) mrk;
518                         mrk24->nport_handle = cpu_to_le16(loop_id);
519                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
520                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
521                         mrk24->vp_index = vha->vp_idx;
522                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
523                 } else {
524                         SET_TARGET_ID(ha, mrk->target, loop_id);
525                         mrk->lun = cpu_to_le16((uint16_t)lun);
526                 }
527         }
528         wmb();
529
530         qla2x00_start_iocbs(vha, req);
531
532         return (QLA_SUCCESS);
533 }
534
535 int
536 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
537     uint16_t loop_id, uint64_t lun, uint8_t type)
538 {
539         int ret;
540         unsigned long flags = 0;
541
542         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
543         ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
544         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
545
546         return (ret);
547 }
548
549 /*
550  * qla2x00_issue_marker
551  *
552  * Issue marker
553  * Caller CAN have hardware lock held as specified by ha_locked parameter.
554  * Might release it, then reaquire.
555  */
556 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
557 {
558         if (ha_locked) {
559                 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
560                                         MK_SYNC_ALL) != QLA_SUCCESS)
561                         return QLA_FUNCTION_FAILED;
562         } else {
563                 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
564                                         MK_SYNC_ALL) != QLA_SUCCESS)
565                         return QLA_FUNCTION_FAILED;
566         }
567         vha->marker_needed = 0;
568
569         return QLA_SUCCESS;
570 }
571
572 static inline int
573 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
574         uint16_t tot_dsds)
575 {
576         struct dsd64 *cur_dsd = NULL, *next_dsd;
577         scsi_qla_host_t *vha;
578         struct qla_hw_data *ha;
579         struct scsi_cmnd *cmd;
580         struct  scatterlist *cur_seg;
581         uint8_t avail_dsds;
582         uint8_t first_iocb = 1;
583         uint32_t dsd_list_len;
584         struct dsd_dma *dsd_ptr;
585         struct ct6_dsd *ctx;
586
587         cmd = GET_CMD_SP(sp);
588
589         /* Update entry type to indicate Command Type 3 IOCB */
590         put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
591
592         /* No data transfer */
593         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
594                 cmd_pkt->byte_count = cpu_to_le32(0);
595                 return 0;
596         }
597
598         vha = sp->vha;
599         ha = vha->hw;
600
601         /* Set transfer direction */
602         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
603                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
604                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
605                 vha->qla_stats.output_requests++;
606         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
607                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
608                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
609                 vha->qla_stats.input_requests++;
610         }
611
612         cur_seg = scsi_sglist(cmd);
613         ctx = GET_CMD_CTX_SP(sp);
614
615         while (tot_dsds) {
616                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
617                     QLA_DSDS_PER_IOCB : tot_dsds;
618                 tot_dsds -= avail_dsds;
619                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
620
621                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
622                     struct dsd_dma, list);
623                 next_dsd = dsd_ptr->dsd_addr;
624                 list_del(&dsd_ptr->list);
625                 ha->gbl_dsd_avail--;
626                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
627                 ctx->dsd_use_cnt++;
628                 ha->gbl_dsd_inuse++;
629
630                 if (first_iocb) {
631                         first_iocb = 0;
632                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
633                                            &cmd_pkt->fcp_dsd.address);
634                         cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
635                 } else {
636                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
637                                            &cur_dsd->address);
638                         cur_dsd->length = cpu_to_le32(dsd_list_len);
639                         cur_dsd++;
640                 }
641                 cur_dsd = next_dsd;
642                 while (avail_dsds) {
643                         append_dsd64(&cur_dsd, cur_seg);
644                         cur_seg = sg_next(cur_seg);
645                         avail_dsds--;
646                 }
647         }
648
649         /* Null termination */
650         cur_dsd->address = 0;
651         cur_dsd->length = 0;
652         cur_dsd++;
653         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
654         return 0;
655 }
656
657 /*
658  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
659  * for Command Type 6.
660  *
661  * @dsds: number of data segment decriptors needed
662  *
663  * Returns the number of dsd list needed to store @dsds.
664  */
665 static inline uint16_t
666 qla24xx_calc_dsd_lists(uint16_t dsds)
667 {
668         uint16_t dsd_lists = 0;
669
670         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
671         if (dsds % QLA_DSDS_PER_IOCB)
672                 dsd_lists++;
673         return dsd_lists;
674 }
675
676
677 /**
678  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
679  * IOCB types.
680  *
681  * @sp: SRB command to process
682  * @cmd_pkt: Command type 3 IOCB
683  * @tot_dsds: Total number of segments to transfer
684  * @req: pointer to request queue
685  */
686 inline void
687 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
688         uint16_t tot_dsds, struct req_que *req)
689 {
690         uint16_t        avail_dsds;
691         struct dsd64    *cur_dsd;
692         scsi_qla_host_t *vha;
693         struct scsi_cmnd *cmd;
694         struct scatterlist *sg;
695         int i;
696
697         cmd = GET_CMD_SP(sp);
698
699         /* Update entry type to indicate Command Type 3 IOCB */
700         put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
701
702         /* No data transfer */
703         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
704                 cmd_pkt->byte_count = cpu_to_le32(0);
705                 return;
706         }
707
708         vha = sp->vha;
709
710         /* Set transfer direction */
711         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
712                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
713                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
714                 vha->qla_stats.output_requests++;
715         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
716                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
717                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
718                 vha->qla_stats.input_requests++;
719         }
720
721         /* One DSD is available in the Command Type 3 IOCB */
722         avail_dsds = 1;
723         cur_dsd = &cmd_pkt->dsd;
724
725         /* Load data segments */
726
727         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
728                 cont_a64_entry_t *cont_pkt;
729
730                 /* Allocate additional continuation packets? */
731                 if (avail_dsds == 0) {
732                         /*
733                          * Five DSDs are available in the Continuation
734                          * Type 1 IOCB.
735                          */
736                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
737                         cur_dsd = cont_pkt->dsd;
738                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
739                 }
740
741                 append_dsd64(&cur_dsd, sg);
742                 avail_dsds--;
743         }
744 }
745
746 struct fw_dif_context {
747         uint32_t ref_tag;
748         uint16_t app_tag;
749         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
750         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
751 };
752
753 /*
754  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
755  *
756  */
757 static inline void
758 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
759     unsigned int protcnt)
760 {
761         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
762
763         switch (scsi_get_prot_type(cmd)) {
764         case SCSI_PROT_DIF_TYPE0:
765                 /*
766                  * No check for ql2xenablehba_err_chk, as it would be an
767                  * I/O error if hba tag generation is not done.
768                  */
769                 pkt->ref_tag = cpu_to_le32((uint32_t)
770                     (0xffffffff & scsi_get_lba(cmd)));
771
772                 if (!qla2x00_hba_err_chk_enabled(sp))
773                         break;
774
775                 pkt->ref_tag_mask[0] = 0xff;
776                 pkt->ref_tag_mask[1] = 0xff;
777                 pkt->ref_tag_mask[2] = 0xff;
778                 pkt->ref_tag_mask[3] = 0xff;
779                 break;
780
781         /*
782          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
783          * match LBA in CDB + N
784          */
785         case SCSI_PROT_DIF_TYPE2:
786                 pkt->app_tag = cpu_to_le16(0);
787                 pkt->app_tag_mask[0] = 0x0;
788                 pkt->app_tag_mask[1] = 0x0;
789
790                 pkt->ref_tag = cpu_to_le32((uint32_t)
791                     (0xffffffff & scsi_get_lba(cmd)));
792
793                 if (!qla2x00_hba_err_chk_enabled(sp))
794                         break;
795
796                 /* enable ALL bytes of the ref tag */
797                 pkt->ref_tag_mask[0] = 0xff;
798                 pkt->ref_tag_mask[1] = 0xff;
799                 pkt->ref_tag_mask[2] = 0xff;
800                 pkt->ref_tag_mask[3] = 0xff;
801                 break;
802
803         /* For Type 3 protection: 16 bit GUARD only */
804         case SCSI_PROT_DIF_TYPE3:
805                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
806                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
807                                                                 0x00;
808                 break;
809
810         /*
811          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
812          * 16 bit app tag.
813          */
814         case SCSI_PROT_DIF_TYPE1:
815                 pkt->ref_tag = cpu_to_le32((uint32_t)
816                     (0xffffffff & scsi_get_lba(cmd)));
817                 pkt->app_tag = cpu_to_le16(0);
818                 pkt->app_tag_mask[0] = 0x0;
819                 pkt->app_tag_mask[1] = 0x0;
820
821                 if (!qla2x00_hba_err_chk_enabled(sp))
822                         break;
823
824                 /* enable ALL bytes of the ref tag */
825                 pkt->ref_tag_mask[0] = 0xff;
826                 pkt->ref_tag_mask[1] = 0xff;
827                 pkt->ref_tag_mask[2] = 0xff;
828                 pkt->ref_tag_mask[3] = 0xff;
829                 break;
830         }
831 }
832
833 int
834 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
835         uint32_t *partial)
836 {
837         struct scatterlist *sg;
838         uint32_t cumulative_partial, sg_len;
839         dma_addr_t sg_dma_addr;
840
841         if (sgx->num_bytes == sgx->tot_bytes)
842                 return 0;
843
844         sg = sgx->cur_sg;
845         cumulative_partial = sgx->tot_partial;
846
847         sg_dma_addr = sg_dma_address(sg);
848         sg_len = sg_dma_len(sg);
849
850         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
851
852         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
853                 sgx->dma_len = (blk_sz - cumulative_partial);
854                 sgx->tot_partial = 0;
855                 sgx->num_bytes += blk_sz;
856                 *partial = 0;
857         } else {
858                 sgx->dma_len = sg_len - sgx->bytes_consumed;
859                 sgx->tot_partial += sgx->dma_len;
860                 *partial = 1;
861         }
862
863         sgx->bytes_consumed += sgx->dma_len;
864
865         if (sg_len == sgx->bytes_consumed) {
866                 sg = sg_next(sg);
867                 sgx->num_sg++;
868                 sgx->cur_sg = sg;
869                 sgx->bytes_consumed = 0;
870         }
871
872         return 1;
873 }
874
875 int
876 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
877         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
878 {
879         void *next_dsd;
880         uint8_t avail_dsds = 0;
881         uint32_t dsd_list_len;
882         struct dsd_dma *dsd_ptr;
883         struct scatterlist *sg_prot;
884         struct dsd64 *cur_dsd = dsd;
885         uint16_t        used_dsds = tot_dsds;
886         uint32_t        prot_int; /* protection interval */
887         uint32_t        partial;
888         struct qla2_sgx sgx;
889         dma_addr_t      sle_dma;
890         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
891         struct scsi_cmnd *cmd;
892
893         memset(&sgx, 0, sizeof(struct qla2_sgx));
894         if (sp) {
895                 cmd = GET_CMD_SP(sp);
896                 prot_int = cmd->device->sector_size;
897
898                 sgx.tot_bytes = scsi_bufflen(cmd);
899                 sgx.cur_sg = scsi_sglist(cmd);
900                 sgx.sp = sp;
901
902                 sg_prot = scsi_prot_sglist(cmd);
903         } else if (tc) {
904                 prot_int      = tc->blk_sz;
905                 sgx.tot_bytes = tc->bufflen;
906                 sgx.cur_sg    = tc->sg;
907                 sg_prot       = tc->prot_sg;
908         } else {
909                 BUG();
910                 return 1;
911         }
912
913         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
914
915                 sle_dma = sgx.dma_addr;
916                 sle_dma_len = sgx.dma_len;
917 alloc_and_fill:
918                 /* Allocate additional continuation packets? */
919                 if (avail_dsds == 0) {
920                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
921                                         QLA_DSDS_PER_IOCB : used_dsds;
922                         dsd_list_len = (avail_dsds + 1) * 12;
923                         used_dsds -= avail_dsds;
924
925                         /* allocate tracking DS */
926                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
927                         if (!dsd_ptr)
928                                 return 1;
929
930                         /* allocate new list */
931                         dsd_ptr->dsd_addr = next_dsd =
932                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
933                                 &dsd_ptr->dsd_list_dma);
934
935                         if (!next_dsd) {
936                                 /*
937                                  * Need to cleanup only this dsd_ptr, rest
938                                  * will be done by sp_free_dma()
939                                  */
940                                 kfree(dsd_ptr);
941                                 return 1;
942                         }
943
944                         if (sp) {
945                                 list_add_tail(&dsd_ptr->list,
946                                     &((struct crc_context *)
947                                             sp->u.scmd.ctx)->dsd_list);
948
949                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
950                         } else {
951                                 list_add_tail(&dsd_ptr->list,
952                                     &(tc->ctx->dsd_list));
953                                 *tc->ctx_dsd_alloced = 1;
954                         }
955
956
957                         /* add new list to cmd iocb or last list */
958                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
959                                            &cur_dsd->address);
960                         cur_dsd->length = cpu_to_le32(dsd_list_len);
961                         cur_dsd = next_dsd;
962                 }
963                 put_unaligned_le64(sle_dma, &cur_dsd->address);
964                 cur_dsd->length = cpu_to_le32(sle_dma_len);
965                 cur_dsd++;
966                 avail_dsds--;
967
968                 if (partial == 0) {
969                         /* Got a full protection interval */
970                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
971                         sle_dma_len = 8;
972
973                         tot_prot_dma_len += sle_dma_len;
974                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
975                                 tot_prot_dma_len = 0;
976                                 sg_prot = sg_next(sg_prot);
977                         }
978
979                         partial = 1; /* So as to not re-enter this block */
980                         goto alloc_and_fill;
981                 }
982         }
983         /* Null termination */
984         cur_dsd->address = 0;
985         cur_dsd->length = 0;
986         cur_dsd++;
987         return 0;
988 }
989
990 int
991 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
992         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
993 {
994         void *next_dsd;
995         uint8_t avail_dsds = 0;
996         uint32_t dsd_list_len;
997         struct dsd_dma *dsd_ptr;
998         struct scatterlist *sg, *sgl;
999         struct dsd64 *cur_dsd = dsd;
1000         int     i;
1001         uint16_t        used_dsds = tot_dsds;
1002         struct scsi_cmnd *cmd;
1003
1004         if (sp) {
1005                 cmd = GET_CMD_SP(sp);
1006                 sgl = scsi_sglist(cmd);
1007         } else if (tc) {
1008                 sgl = tc->sg;
1009         } else {
1010                 BUG();
1011                 return 1;
1012         }
1013
1014
1015         for_each_sg(sgl, sg, tot_dsds, i) {
1016                 /* Allocate additional continuation packets? */
1017                 if (avail_dsds == 0) {
1018                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1019                                         QLA_DSDS_PER_IOCB : used_dsds;
1020                         dsd_list_len = (avail_dsds + 1) * 12;
1021                         used_dsds -= avail_dsds;
1022
1023                         /* allocate tracking DS */
1024                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1025                         if (!dsd_ptr)
1026                                 return 1;
1027
1028                         /* allocate new list */
1029                         dsd_ptr->dsd_addr = next_dsd =
1030                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1031                                 &dsd_ptr->dsd_list_dma);
1032
1033                         if (!next_dsd) {
1034                                 /*
1035                                  * Need to cleanup only this dsd_ptr, rest
1036                                  * will be done by sp_free_dma()
1037                                  */
1038                                 kfree(dsd_ptr);
1039                                 return 1;
1040                         }
1041
1042                         if (sp) {
1043                                 list_add_tail(&dsd_ptr->list,
1044                                     &((struct crc_context *)
1045                                             sp->u.scmd.ctx)->dsd_list);
1046
1047                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1048                         } else {
1049                                 list_add_tail(&dsd_ptr->list,
1050                                     &(tc->ctx->dsd_list));
1051                                 *tc->ctx_dsd_alloced = 1;
1052                         }
1053
1054                         /* add new list to cmd iocb or last list */
1055                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
1056                                            &cur_dsd->address);
1057                         cur_dsd->length = cpu_to_le32(dsd_list_len);
1058                         cur_dsd = next_dsd;
1059                 }
1060                 append_dsd64(&cur_dsd, sg);
1061                 avail_dsds--;
1062
1063         }
1064         /* Null termination */
1065         cur_dsd->address = 0;
1066         cur_dsd->length = 0;
1067         cur_dsd++;
1068         return 0;
1069 }
1070
1071 int
1072 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1073         struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1074 {
1075         struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1076         struct scatterlist *sg, *sgl;
1077         struct crc_context *difctx = NULL;
1078         struct scsi_qla_host *vha;
1079         uint dsd_list_len;
1080         uint avail_dsds = 0;
1081         uint used_dsds = tot_dsds;
1082         bool dif_local_dma_alloc = false;
1083         bool direction_to_device = false;
1084         int i;
1085
1086         if (sp) {
1087                 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1088
1089                 sgl = scsi_prot_sglist(cmd);
1090                 vha = sp->vha;
1091                 difctx = sp->u.scmd.ctx;
1092                 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1093                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1094                   "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1095                         __func__, cmd, difctx, sp);
1096         } else if (tc) {
1097                 vha = tc->vha;
1098                 sgl = tc->prot_sg;
1099                 difctx = tc->ctx;
1100                 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1101         } else {
1102                 BUG();
1103                 return 1;
1104         }
1105
1106         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1107             "%s: enter (write=%u)\n", __func__, direction_to_device);
1108
1109         /* if initiator doing write or target doing read */
1110         if (direction_to_device) {
1111                 for_each_sg(sgl, sg, tot_dsds, i) {
1112                         u64 sle_phys = sg_phys(sg);
1113
1114                         /* If SGE addr + len flips bits in upper 32-bits */
1115                         if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1116                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1117                                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1118                                     __func__, sle_phys, sg->length);
1119
1120                                 if (difctx) {
1121                                         ha->dif_bundle_crossed_pages++;
1122                                         dif_local_dma_alloc = true;
1123                                 } else {
1124                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1125                                             vha, 0xe022,
1126                                             "%s: difctx pointer is NULL\n",
1127                                             __func__);
1128                                 }
1129                                 break;
1130                         }
1131                 }
1132                 ha->dif_bundle_writes++;
1133         } else {
1134                 ha->dif_bundle_reads++;
1135         }
1136
1137         if (ql2xdifbundlinginternalbuffers)
1138                 dif_local_dma_alloc = direction_to_device;
1139
1140         if (dif_local_dma_alloc) {
1141                 u32 track_difbundl_buf = 0;
1142                 u32 ldma_sg_len = 0;
1143                 u8 ldma_needed = 1;
1144
1145                 difctx->no_dif_bundl = 0;
1146                 difctx->dif_bundl_len = 0;
1147
1148                 /* Track DSD buffers */
1149                 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1150                 /* Track local DMA buffers */
1151                 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1152
1153                 for_each_sg(sgl, sg, tot_dsds, i) {
1154                         u32 sglen = sg_dma_len(sg);
1155
1156                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1157                             "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1158                             __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1159                             difctx->dif_bundl_len, ldma_needed);
1160
1161                         while (sglen) {
1162                                 u32 xfrlen = 0;
1163
1164                                 if (ldma_needed) {
1165                                         /*
1166                                          * Allocate list item to store
1167                                          * the DMA buffers
1168                                          */
1169                                         dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1170                                             GFP_ATOMIC);
1171                                         if (!dsd_ptr) {
1172                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1173                                                     "%s: failed alloc dsd_ptr\n",
1174                                                     __func__);
1175                                                 return 1;
1176                                         }
1177                                         ha->dif_bundle_kallocs++;
1178
1179                                         /* allocate dma buffer */
1180                                         dsd_ptr->dsd_addr = dma_pool_alloc
1181                                                 (ha->dif_bundl_pool, GFP_ATOMIC,
1182                                                  &dsd_ptr->dsd_list_dma);
1183                                         if (!dsd_ptr->dsd_addr) {
1184                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1185                                                     "%s: failed alloc ->dsd_ptr\n",
1186                                                     __func__);
1187                                                 /*
1188                                                  * need to cleanup only this
1189                                                  * dsd_ptr rest will be done
1190                                                  * by sp_free_dma()
1191                                                  */
1192                                                 kfree(dsd_ptr);
1193                                                 ha->dif_bundle_kallocs--;
1194                                                 return 1;
1195                                         }
1196                                         ha->dif_bundle_dma_allocs++;
1197                                         ldma_needed = 0;
1198                                         difctx->no_dif_bundl++;
1199                                         list_add_tail(&dsd_ptr->list,
1200                                             &difctx->ldif_dma_hndl_list);
1201                                 }
1202
1203                                 /* xfrlen is min of dma pool size and sglen */
1204                                 xfrlen = (sglen >
1205                                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1206                                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1207                                     sglen;
1208
1209                                 /* replace with local allocated dma buffer */
1210                                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1211                                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1212                                     difctx->dif_bundl_len);
1213                                 difctx->dif_bundl_len += xfrlen;
1214                                 sglen -= xfrlen;
1215                                 ldma_sg_len += xfrlen;
1216                                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1217                                     sg_is_last(sg)) {
1218                                         ldma_needed = 1;
1219                                         ldma_sg_len = 0;
1220                                 }
1221                         }
1222                 }
1223
1224                 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1225                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1226                     "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1227                     difctx->dif_bundl_len, difctx->no_dif_bundl,
1228                     track_difbundl_buf);
1229
1230                 if (sp)
1231                         sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1232                 else
1233                         tc->prot_flags = DIF_BUNDL_DMA_VALID;
1234
1235                 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1236                     &difctx->ldif_dma_hndl_list, list) {
1237                         u32 sglen = (difctx->dif_bundl_len >
1238                             DIF_BUNDLING_DMA_POOL_SIZE) ?
1239                             DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1240
1241                         BUG_ON(track_difbundl_buf == 0);
1242
1243                         /* Allocate additional continuation packets? */
1244                         if (avail_dsds == 0) {
1245                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1246                                     0xe024,
1247                                     "%s: adding continuation iocb's\n",
1248                                     __func__);
1249                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1250                                     QLA_DSDS_PER_IOCB : used_dsds;
1251                                 dsd_list_len = (avail_dsds + 1) * 12;
1252                                 used_dsds -= avail_dsds;
1253
1254                                 /* allocate tracking DS */
1255                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1256                                 if (!dsd_ptr) {
1257                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1258                                             "%s: failed alloc dsd_ptr\n",
1259                                             __func__);
1260                                         return 1;
1261                                 }
1262                                 ha->dif_bundle_kallocs++;
1263
1264                                 difctx->no_ldif_dsd++;
1265                                 /* allocate new list */
1266                                 dsd_ptr->dsd_addr =
1267                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1268                                         &dsd_ptr->dsd_list_dma);
1269                                 if (!dsd_ptr->dsd_addr) {
1270                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1271                                             "%s: failed alloc ->dsd_addr\n",
1272                                             __func__);
1273                                         /*
1274                                          * need to cleanup only this dsd_ptr
1275                                          *  rest will be done by sp_free_dma()
1276                                          */
1277                                         kfree(dsd_ptr);
1278                                         ha->dif_bundle_kallocs--;
1279                                         return 1;
1280                                 }
1281                                 ha->dif_bundle_dma_allocs++;
1282
1283                                 if (sp) {
1284                                         list_add_tail(&dsd_ptr->list,
1285                                             &difctx->ldif_dsd_list);
1286                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1287                                 } else {
1288                                         list_add_tail(&dsd_ptr->list,
1289                                             &difctx->ldif_dsd_list);
1290                                         tc->ctx_dsd_alloced = 1;
1291                                 }
1292
1293                                 /* add new list to cmd iocb or last list */
1294                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1295                                                    &cur_dsd->address);
1296                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1297                                 cur_dsd = dsd_ptr->dsd_addr;
1298                         }
1299                         put_unaligned_le64(dif_dsd->dsd_list_dma,
1300                                            &cur_dsd->address);
1301                         cur_dsd->length = cpu_to_le32(sglen);
1302                         cur_dsd++;
1303                         avail_dsds--;
1304                         difctx->dif_bundl_len -= sglen;
1305                         track_difbundl_buf--;
1306                 }
1307
1308                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1309                     "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1310                         difctx->no_ldif_dsd, difctx->no_dif_bundl);
1311         } else {
1312                 for_each_sg(sgl, sg, tot_dsds, i) {
1313                         /* Allocate additional continuation packets? */
1314                         if (avail_dsds == 0) {
1315                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1316                                     QLA_DSDS_PER_IOCB : used_dsds;
1317                                 dsd_list_len = (avail_dsds + 1) * 12;
1318                                 used_dsds -= avail_dsds;
1319
1320                                 /* allocate tracking DS */
1321                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1322                                 if (!dsd_ptr) {
1323                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1324                                             vha, 0xe027,
1325                                             "%s: failed alloc dsd_dma...\n",
1326                                             __func__);
1327                                         return 1;
1328                                 }
1329
1330                                 /* allocate new list */
1331                                 dsd_ptr->dsd_addr =
1332                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1333                                         &dsd_ptr->dsd_list_dma);
1334                                 if (!dsd_ptr->dsd_addr) {
1335                                         /* need to cleanup only this dsd_ptr */
1336                                         /* rest will be done by sp_free_dma() */
1337                                         kfree(dsd_ptr);
1338                                         return 1;
1339                                 }
1340
1341                                 if (sp) {
1342                                         list_add_tail(&dsd_ptr->list,
1343                                             &difctx->dsd_list);
1344                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1345                                 } else {
1346                                         list_add_tail(&dsd_ptr->list,
1347                                             &difctx->dsd_list);
1348                                         tc->ctx_dsd_alloced = 1;
1349                                 }
1350
1351                                 /* add new list to cmd iocb or last list */
1352                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1353                                                    &cur_dsd->address);
1354                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1355                                 cur_dsd = dsd_ptr->dsd_addr;
1356                         }
1357                         append_dsd64(&cur_dsd, sg);
1358                         avail_dsds--;
1359                 }
1360         }
1361         /* Null termination */
1362         cur_dsd->address = 0;
1363         cur_dsd->length = 0;
1364         cur_dsd++;
1365         return 0;
1366 }
1367 /**
1368  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1369  *                                                      Type 6 IOCB types.
1370  *
1371  * @sp: SRB command to process
1372  * @cmd_pkt: Command type 3 IOCB
1373  * @tot_dsds: Total number of segments to transfer
1374  * @tot_prot_dsds: Total number of segments with protection information
1375  * @fw_prot_opts: Protection options to be passed to firmware
1376  */
1377 static inline int
1378 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1379     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1380 {
1381         struct dsd64            *cur_dsd;
1382         uint32_t                *fcp_dl;
1383         scsi_qla_host_t         *vha;
1384         struct scsi_cmnd        *cmd;
1385         uint32_t                total_bytes = 0;
1386         uint32_t                data_bytes;
1387         uint32_t                dif_bytes;
1388         uint8_t                 bundling = 1;
1389         uint16_t                blk_size;
1390         struct crc_context      *crc_ctx_pkt = NULL;
1391         struct qla_hw_data      *ha;
1392         uint8_t                 additional_fcpcdb_len;
1393         uint16_t                fcp_cmnd_len;
1394         struct fcp_cmnd         *fcp_cmnd;
1395         dma_addr_t              crc_ctx_dma;
1396
1397         cmd = GET_CMD_SP(sp);
1398
1399         /* Update entry type to indicate Command Type CRC_2 IOCB */
1400         put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1401
1402         vha = sp->vha;
1403         ha = vha->hw;
1404
1405         /* No data transfer */
1406         data_bytes = scsi_bufflen(cmd);
1407         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1408                 cmd_pkt->byte_count = cpu_to_le32(0);
1409                 return QLA_SUCCESS;
1410         }
1411
1412         cmd_pkt->vp_index = sp->vha->vp_idx;
1413
1414         /* Set transfer direction */
1415         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1416                 cmd_pkt->control_flags =
1417                     cpu_to_le16(CF_WRITE_DATA);
1418         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1419                 cmd_pkt->control_flags =
1420                     cpu_to_le16(CF_READ_DATA);
1421         }
1422
1423         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1424             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1425             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1426             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1427                 bundling = 0;
1428
1429         /* Allocate CRC context from global pool */
1430         crc_ctx_pkt = sp->u.scmd.ctx =
1431             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1432
1433         if (!crc_ctx_pkt)
1434                 goto crc_queuing_error;
1435
1436         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1437
1438         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1439
1440         /* Set handle */
1441         crc_ctx_pkt->handle = cmd_pkt->handle;
1442
1443         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1444
1445         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1446             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1447
1448         put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1449         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1450
1451         /* Determine SCSI command length -- align to 4 byte boundary */
1452         if (cmd->cmd_len > 16) {
1453                 additional_fcpcdb_len = cmd->cmd_len - 16;
1454                 if ((cmd->cmd_len % 4) != 0) {
1455                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1456                         goto crc_queuing_error;
1457                 }
1458                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1459         } else {
1460                 additional_fcpcdb_len = 0;
1461                 fcp_cmnd_len = 12 + 16 + 4;
1462         }
1463
1464         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1465
1466         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1467         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1468                 fcp_cmnd->additional_cdb_len |= 1;
1469         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1470                 fcp_cmnd->additional_cdb_len |= 2;
1471
1472         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1473         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1474         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1475         put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1476                            &cmd_pkt->fcp_cmnd_dseg_address);
1477         fcp_cmnd->task_management = 0;
1478         fcp_cmnd->task_attribute = TSK_SIMPLE;
1479
1480         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1481
1482         /* Compute dif len and adjust data len to incude protection */
1483         dif_bytes = 0;
1484         blk_size = cmd->device->sector_size;
1485         dif_bytes = (data_bytes / blk_size) * 8;
1486
1487         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1488         case SCSI_PROT_READ_INSERT:
1489         case SCSI_PROT_WRITE_STRIP:
1490                 total_bytes = data_bytes;
1491                 data_bytes += dif_bytes;
1492                 break;
1493
1494         case SCSI_PROT_READ_STRIP:
1495         case SCSI_PROT_WRITE_INSERT:
1496         case SCSI_PROT_READ_PASS:
1497         case SCSI_PROT_WRITE_PASS:
1498                 total_bytes = data_bytes + dif_bytes;
1499                 break;
1500         default:
1501                 BUG();
1502         }
1503
1504         if (!qla2x00_hba_err_chk_enabled(sp))
1505                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1506         /* HBA error checking enabled */
1507         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1508                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1509                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1510                         SCSI_PROT_DIF_TYPE2))
1511                         fw_prot_opts |= BIT_10;
1512                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1513                     SCSI_PROT_DIF_TYPE3)
1514                         fw_prot_opts |= BIT_11;
1515         }
1516
1517         if (!bundling) {
1518                 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
1519         } else {
1520                 /*
1521                  * Configure Bundling if we need to fetch interlaving
1522                  * protection PCI accesses
1523                  */
1524                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1525                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1526                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1527                                                         tot_prot_dsds);
1528                 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
1529         }
1530
1531         /* Finish the common fields of CRC pkt */
1532         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1533         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1534         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1535         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1536         /* Fibre channel byte count */
1537         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1538         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1539             additional_fcpcdb_len);
1540         *fcp_dl = htonl(total_bytes);
1541
1542         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1543                 cmd_pkt->byte_count = cpu_to_le32(0);
1544                 return QLA_SUCCESS;
1545         }
1546         /* Walks data segments */
1547
1548         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1549
1550         if (!bundling && tot_prot_dsds) {
1551                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1552                         cur_dsd, tot_dsds, NULL))
1553                         goto crc_queuing_error;
1554         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1555                         (tot_dsds - tot_prot_dsds), NULL))
1556                 goto crc_queuing_error;
1557
1558         if (bundling && tot_prot_dsds) {
1559                 /* Walks dif segments */
1560                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1561                 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1562                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1563                                 tot_prot_dsds, NULL))
1564                         goto crc_queuing_error;
1565         }
1566         return QLA_SUCCESS;
1567
1568 crc_queuing_error:
1569         /* Cleanup will be performed by the caller */
1570
1571         return QLA_FUNCTION_FAILED;
1572 }
1573
1574 /**
1575  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1576  * @sp: command to send to the ISP
1577  *
1578  * Returns non-zero if a failure occurred, else zero.
1579  */
1580 int
1581 qla24xx_start_scsi(srb_t *sp)
1582 {
1583         int             nseg;
1584         unsigned long   flags;
1585         uint32_t        *clr_ptr;
1586         uint32_t        index;
1587         uint32_t        handle;
1588         struct cmd_type_7 *cmd_pkt;
1589         uint16_t        cnt;
1590         uint16_t        req_cnt;
1591         uint16_t        tot_dsds;
1592         struct req_que *req = NULL;
1593         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1594         struct scsi_qla_host *vha = sp->vha;
1595         struct qla_hw_data *ha = vha->hw;
1596
1597         /* Setup device pointers. */
1598         req = vha->req;
1599
1600         /* So we know we haven't pci_map'ed anything yet */
1601         tot_dsds = 0;
1602
1603         /* Send marker if required */
1604         if (vha->marker_needed != 0) {
1605                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1606                     QLA_SUCCESS)
1607                         return QLA_FUNCTION_FAILED;
1608                 vha->marker_needed = 0;
1609         }
1610
1611         /* Acquire ring specific lock */
1612         spin_lock_irqsave(&ha->hardware_lock, flags);
1613
1614         /* Check for room in outstanding command list. */
1615         handle = req->current_outstanding_cmd;
1616         for (index = 1; index < req->num_outstanding_cmds; index++) {
1617                 handle++;
1618                 if (handle == req->num_outstanding_cmds)
1619                         handle = 1;
1620                 if (!req->outstanding_cmds[handle])
1621                         break;
1622         }
1623         if (index == req->num_outstanding_cmds)
1624                 goto queuing_error;
1625
1626         /* Map the sg table so we have an accurate count of sg entries needed */
1627         if (scsi_sg_count(cmd)) {
1628                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1629                     scsi_sg_count(cmd), cmd->sc_data_direction);
1630                 if (unlikely(!nseg))
1631                         goto queuing_error;
1632         } else
1633                 nseg = 0;
1634
1635         tot_dsds = nseg;
1636         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1637         if (req->cnt < (req_cnt + 2)) {
1638                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1639                     RD_REG_DWORD_RELAXED(req->req_q_out);
1640                 if (req->ring_index < cnt)
1641                         req->cnt = cnt - req->ring_index;
1642                 else
1643                         req->cnt = req->length -
1644                                 (req->ring_index - cnt);
1645                 if (req->cnt < (req_cnt + 2))
1646                         goto queuing_error;
1647         }
1648
1649         /* Build command packet. */
1650         req->current_outstanding_cmd = handle;
1651         req->outstanding_cmds[handle] = sp;
1652         sp->handle = handle;
1653         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1654         req->cnt -= req_cnt;
1655
1656         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1657         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1658
1659         /* Zero out remaining portion of packet. */
1660         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1661         clr_ptr = (uint32_t *)cmd_pkt + 2;
1662         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1663         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1664
1665         /* Set NPORT-ID and LUN number*/
1666         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1667         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1668         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1669         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1670         cmd_pkt->vp_index = sp->vha->vp_idx;
1671
1672         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1673         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1674
1675         cmd_pkt->task = TSK_SIMPLE;
1676
1677         /* Load SCSI command packet. */
1678         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1679         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1680
1681         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1682
1683         /* Build IOCB segments */
1684         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1685
1686         /* Set total data segment count. */
1687         cmd_pkt->entry_count = (uint8_t)req_cnt;
1688         wmb();
1689         /* Adjust ring index. */
1690         req->ring_index++;
1691         if (req->ring_index == req->length) {
1692                 req->ring_index = 0;
1693                 req->ring_ptr = req->ring;
1694         } else
1695                 req->ring_ptr++;
1696
1697         sp->flags |= SRB_DMA_VALID;
1698
1699         /* Set chip new ring index. */
1700         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1701
1702         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1703         return QLA_SUCCESS;
1704
1705 queuing_error:
1706         if (tot_dsds)
1707                 scsi_dma_unmap(cmd);
1708
1709         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1710
1711         return QLA_FUNCTION_FAILED;
1712 }
1713
1714 /**
1715  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1716  * @sp: command to send to the ISP
1717  *
1718  * Returns non-zero if a failure occurred, else zero.
1719  */
1720 int
1721 qla24xx_dif_start_scsi(srb_t *sp)
1722 {
1723         int                     nseg;
1724         unsigned long           flags;
1725         uint32_t                *clr_ptr;
1726         uint32_t                index;
1727         uint32_t                handle;
1728         uint16_t                cnt;
1729         uint16_t                req_cnt = 0;
1730         uint16_t                tot_dsds;
1731         uint16_t                tot_prot_dsds;
1732         uint16_t                fw_prot_opts = 0;
1733         struct req_que          *req = NULL;
1734         struct rsp_que          *rsp = NULL;
1735         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736         struct scsi_qla_host    *vha = sp->vha;
1737         struct qla_hw_data      *ha = vha->hw;
1738         struct cmd_type_crc_2   *cmd_pkt;
1739         uint32_t                status = 0;
1740
1741 #define QDSS_GOT_Q_SPACE        BIT_0
1742
1743         /* Only process protection or >16 cdb in this routine */
1744         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                 if (cmd->cmd_len <= 16)
1746                         return qla24xx_start_scsi(sp);
1747         }
1748
1749         /* Setup device pointers. */
1750         req = vha->req;
1751         rsp = req->rsp;
1752
1753         /* So we know we haven't pci_map'ed anything yet */
1754         tot_dsds = 0;
1755
1756         /* Send marker if required */
1757         if (vha->marker_needed != 0) {
1758                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                     QLA_SUCCESS)
1760                         return QLA_FUNCTION_FAILED;
1761                 vha->marker_needed = 0;
1762         }
1763
1764         /* Acquire ring specific lock */
1765         spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767         /* Check for room in outstanding command list. */
1768         handle = req->current_outstanding_cmd;
1769         for (index = 1; index < req->num_outstanding_cmds; index++) {
1770                 handle++;
1771                 if (handle == req->num_outstanding_cmds)
1772                         handle = 1;
1773                 if (!req->outstanding_cmds[handle])
1774                         break;
1775         }
1776
1777         if (index == req->num_outstanding_cmds)
1778                 goto queuing_error;
1779
1780         /* Compute number of required data segments */
1781         /* Map the sg table so we have an accurate count of sg entries needed */
1782         if (scsi_sg_count(cmd)) {
1783                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1784                     scsi_sg_count(cmd), cmd->sc_data_direction);
1785                 if (unlikely(!nseg))
1786                         goto queuing_error;
1787                 else
1788                         sp->flags |= SRB_DMA_VALID;
1789
1790                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1791                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1792                         struct qla2_sgx sgx;
1793                         uint32_t        partial;
1794
1795                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1796                         sgx.tot_bytes = scsi_bufflen(cmd);
1797                         sgx.cur_sg = scsi_sglist(cmd);
1798                         sgx.sp = sp;
1799
1800                         nseg = 0;
1801                         while (qla24xx_get_one_block_sg(
1802                             cmd->device->sector_size, &sgx, &partial))
1803                                 nseg++;
1804                 }
1805         } else
1806                 nseg = 0;
1807
1808         /* number of required data segments */
1809         tot_dsds = nseg;
1810
1811         /* Compute number of required protection segments */
1812         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1813                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1814                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1815                 if (unlikely(!nseg))
1816                         goto queuing_error;
1817                 else
1818                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1819
1820                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1821                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1822                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1823                 }
1824         } else {
1825                 nseg = 0;
1826         }
1827
1828         req_cnt = 1;
1829         /* Total Data and protection sg segment(s) */
1830         tot_prot_dsds = nseg;
1831         tot_dsds += nseg;
1832         if (req->cnt < (req_cnt + 2)) {
1833                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1834                     RD_REG_DWORD_RELAXED(req->req_q_out);
1835                 if (req->ring_index < cnt)
1836                         req->cnt = cnt - req->ring_index;
1837                 else
1838                         req->cnt = req->length -
1839                                 (req->ring_index - cnt);
1840                 if (req->cnt < (req_cnt + 2))
1841                         goto queuing_error;
1842         }
1843
1844         status |= QDSS_GOT_Q_SPACE;
1845
1846         /* Build header part of command packet (excluding the OPCODE). */
1847         req->current_outstanding_cmd = handle;
1848         req->outstanding_cmds[handle] = sp;
1849         sp->handle = handle;
1850         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1851         req->cnt -= req_cnt;
1852
1853         /* Fill-in common area */
1854         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1855         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1856
1857         clr_ptr = (uint32_t *)cmd_pkt + 2;
1858         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1859
1860         /* Set NPORT-ID and LUN number*/
1861         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1862         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1863         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1864         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1865
1866         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1867         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1868
1869         /* Total Data and protection segment(s) */
1870         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1871
1872         /* Build IOCB segments and adjust for data protection segments */
1873         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1874             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1875                 QLA_SUCCESS)
1876                 goto queuing_error;
1877
1878         cmd_pkt->entry_count = (uint8_t)req_cnt;
1879         /* Specify response queue number where completion should happen */
1880         cmd_pkt->entry_status = (uint8_t) rsp->id;
1881         cmd_pkt->timeout = cpu_to_le16(0);
1882         wmb();
1883
1884         /* Adjust ring index. */
1885         req->ring_index++;
1886         if (req->ring_index == req->length) {
1887                 req->ring_index = 0;
1888                 req->ring_ptr = req->ring;
1889         } else
1890                 req->ring_ptr++;
1891
1892         /* Set chip new ring index. */
1893         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1894
1895         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1896
1897         return QLA_SUCCESS;
1898
1899 queuing_error:
1900         if (status & QDSS_GOT_Q_SPACE) {
1901                 req->outstanding_cmds[handle] = NULL;
1902                 req->cnt += req_cnt;
1903         }
1904         /* Cleanup will be performed by the caller (queuecommand) */
1905
1906         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1907         return QLA_FUNCTION_FAILED;
1908 }
1909
1910 /**
1911  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1912  * @sp: command to send to the ISP
1913  *
1914  * Returns non-zero if a failure occurred, else zero.
1915  */
1916 static int
1917 qla2xxx_start_scsi_mq(srb_t *sp)
1918 {
1919         int             nseg;
1920         unsigned long   flags;
1921         uint32_t        *clr_ptr;
1922         uint32_t        index;
1923         uint32_t        handle;
1924         struct cmd_type_7 *cmd_pkt;
1925         uint16_t        cnt;
1926         uint16_t        req_cnt;
1927         uint16_t        tot_dsds;
1928         struct req_que *req = NULL;
1929         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1930         struct scsi_qla_host *vha = sp->fcport->vha;
1931         struct qla_hw_data *ha = vha->hw;
1932         struct qla_qpair *qpair = sp->qpair;
1933
1934         /* Acquire qpair specific lock */
1935         spin_lock_irqsave(&qpair->qp_lock, flags);
1936
1937         /* Setup qpair pointers */
1938         req = qpair->req;
1939
1940         /* So we know we haven't pci_map'ed anything yet */
1941         tot_dsds = 0;
1942
1943         /* Send marker if required */
1944         if (vha->marker_needed != 0) {
1945                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1946                     QLA_SUCCESS) {
1947                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1948                         return QLA_FUNCTION_FAILED;
1949                 }
1950                 vha->marker_needed = 0;
1951         }
1952
1953         /* Check for room in outstanding command list. */
1954         handle = req->current_outstanding_cmd;
1955         for (index = 1; index < req->num_outstanding_cmds; index++) {
1956                 handle++;
1957                 if (handle == req->num_outstanding_cmds)
1958                         handle = 1;
1959                 if (!req->outstanding_cmds[handle])
1960                         break;
1961         }
1962         if (index == req->num_outstanding_cmds)
1963                 goto queuing_error;
1964
1965         /* Map the sg table so we have an accurate count of sg entries needed */
1966         if (scsi_sg_count(cmd)) {
1967                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1968                     scsi_sg_count(cmd), cmd->sc_data_direction);
1969                 if (unlikely(!nseg))
1970                         goto queuing_error;
1971         } else
1972                 nseg = 0;
1973
1974         tot_dsds = nseg;
1975         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1976         if (req->cnt < (req_cnt + 2)) {
1977                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1978                     RD_REG_DWORD_RELAXED(req->req_q_out);
1979                 if (req->ring_index < cnt)
1980                         req->cnt = cnt - req->ring_index;
1981                 else
1982                         req->cnt = req->length -
1983                                 (req->ring_index - cnt);
1984                 if (req->cnt < (req_cnt + 2))
1985                         goto queuing_error;
1986         }
1987
1988         /* Build command packet. */
1989         req->current_outstanding_cmd = handle;
1990         req->outstanding_cmds[handle] = sp;
1991         sp->handle = handle;
1992         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1993         req->cnt -= req_cnt;
1994
1995         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1996         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1997
1998         /* Zero out remaining portion of packet. */
1999         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2000         clr_ptr = (uint32_t *)cmd_pkt + 2;
2001         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2002         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2003
2004         /* Set NPORT-ID and LUN number*/
2005         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2006         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2007         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2008         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2009         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2010
2011         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2012         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2013
2014         cmd_pkt->task = TSK_SIMPLE;
2015
2016         /* Load SCSI command packet. */
2017         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2018         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2019
2020         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2021
2022         /* Build IOCB segments */
2023         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2024
2025         /* Set total data segment count. */
2026         cmd_pkt->entry_count = (uint8_t)req_cnt;
2027         wmb();
2028         /* Adjust ring index. */
2029         req->ring_index++;
2030         if (req->ring_index == req->length) {
2031                 req->ring_index = 0;
2032                 req->ring_ptr = req->ring;
2033         } else
2034                 req->ring_ptr++;
2035
2036         sp->flags |= SRB_DMA_VALID;
2037
2038         /* Set chip new ring index. */
2039         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2040
2041         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2042         return QLA_SUCCESS;
2043
2044 queuing_error:
2045         if (tot_dsds)
2046                 scsi_dma_unmap(cmd);
2047
2048         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2049
2050         return QLA_FUNCTION_FAILED;
2051 }
2052
2053
2054 /**
2055  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2056  * @sp: command to send to the ISP
2057  *
2058  * Returns non-zero if a failure occurred, else zero.
2059  */
2060 int
2061 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2062 {
2063         int                     nseg;
2064         unsigned long           flags;
2065         uint32_t                *clr_ptr;
2066         uint32_t                index;
2067         uint32_t                handle;
2068         uint16_t                cnt;
2069         uint16_t                req_cnt = 0;
2070         uint16_t                tot_dsds;
2071         uint16_t                tot_prot_dsds;
2072         uint16_t                fw_prot_opts = 0;
2073         struct req_que          *req = NULL;
2074         struct rsp_que          *rsp = NULL;
2075         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2076         struct scsi_qla_host    *vha = sp->fcport->vha;
2077         struct qla_hw_data      *ha = vha->hw;
2078         struct cmd_type_crc_2   *cmd_pkt;
2079         uint32_t                status = 0;
2080         struct qla_qpair        *qpair = sp->qpair;
2081
2082 #define QDSS_GOT_Q_SPACE        BIT_0
2083
2084         /* Check for host side state */
2085         if (!qpair->online) {
2086                 cmd->result = DID_NO_CONNECT << 16;
2087                 return QLA_INTERFACE_ERROR;
2088         }
2089
2090         if (!qpair->difdix_supported &&
2091                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2092                 cmd->result = DID_NO_CONNECT << 16;
2093                 return QLA_INTERFACE_ERROR;
2094         }
2095
2096         /* Only process protection or >16 cdb in this routine */
2097         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2098                 if (cmd->cmd_len <= 16)
2099                         return qla2xxx_start_scsi_mq(sp);
2100         }
2101
2102         spin_lock_irqsave(&qpair->qp_lock, flags);
2103
2104         /* Setup qpair pointers */
2105         rsp = qpair->rsp;
2106         req = qpair->req;
2107
2108         /* So we know we haven't pci_map'ed anything yet */
2109         tot_dsds = 0;
2110
2111         /* Send marker if required */
2112         if (vha->marker_needed != 0) {
2113                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2114                     QLA_SUCCESS) {
2115                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2116                         return QLA_FUNCTION_FAILED;
2117                 }
2118                 vha->marker_needed = 0;
2119         }
2120
2121         /* Check for room in outstanding command list. */
2122         handle = req->current_outstanding_cmd;
2123         for (index = 1; index < req->num_outstanding_cmds; index++) {
2124                 handle++;
2125                 if (handle == req->num_outstanding_cmds)
2126                         handle = 1;
2127                 if (!req->outstanding_cmds[handle])
2128                         break;
2129         }
2130
2131         if (index == req->num_outstanding_cmds)
2132                 goto queuing_error;
2133
2134         /* Compute number of required data segments */
2135         /* Map the sg table so we have an accurate count of sg entries needed */
2136         if (scsi_sg_count(cmd)) {
2137                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2138                     scsi_sg_count(cmd), cmd->sc_data_direction);
2139                 if (unlikely(!nseg))
2140                         goto queuing_error;
2141                 else
2142                         sp->flags |= SRB_DMA_VALID;
2143
2144                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2145                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2146                         struct qla2_sgx sgx;
2147                         uint32_t        partial;
2148
2149                         memset(&sgx, 0, sizeof(struct qla2_sgx));
2150                         sgx.tot_bytes = scsi_bufflen(cmd);
2151                         sgx.cur_sg = scsi_sglist(cmd);
2152                         sgx.sp = sp;
2153
2154                         nseg = 0;
2155                         while (qla24xx_get_one_block_sg(
2156                             cmd->device->sector_size, &sgx, &partial))
2157                                 nseg++;
2158                 }
2159         } else
2160                 nseg = 0;
2161
2162         /* number of required data segments */
2163         tot_dsds = nseg;
2164
2165         /* Compute number of required protection segments */
2166         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2167                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2168                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2169                 if (unlikely(!nseg))
2170                         goto queuing_error;
2171                 else
2172                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2173
2174                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2175                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2176                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2177                 }
2178         } else {
2179                 nseg = 0;
2180         }
2181
2182         req_cnt = 1;
2183         /* Total Data and protection sg segment(s) */
2184         tot_prot_dsds = nseg;
2185         tot_dsds += nseg;
2186         if (req->cnt < (req_cnt + 2)) {
2187                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2188                     RD_REG_DWORD_RELAXED(req->req_q_out);
2189                 if (req->ring_index < cnt)
2190                         req->cnt = cnt - req->ring_index;
2191                 else
2192                         req->cnt = req->length -
2193                                 (req->ring_index - cnt);
2194                 if (req->cnt < (req_cnt + 2))
2195                         goto queuing_error;
2196         }
2197
2198         status |= QDSS_GOT_Q_SPACE;
2199
2200         /* Build header part of command packet (excluding the OPCODE). */
2201         req->current_outstanding_cmd = handle;
2202         req->outstanding_cmds[handle] = sp;
2203         sp->handle = handle;
2204         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2205         req->cnt -= req_cnt;
2206
2207         /* Fill-in common area */
2208         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2209         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2210
2211         clr_ptr = (uint32_t *)cmd_pkt + 2;
2212         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2213
2214         /* Set NPORT-ID and LUN number*/
2215         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2216         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2217         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2218         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2219
2220         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2221         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2222
2223         /* Total Data and protection segment(s) */
2224         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2225
2226         /* Build IOCB segments and adjust for data protection segments */
2227         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2228             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2229                 QLA_SUCCESS)
2230                 goto queuing_error;
2231
2232         cmd_pkt->entry_count = (uint8_t)req_cnt;
2233         cmd_pkt->timeout = cpu_to_le16(0);
2234         wmb();
2235
2236         /* Adjust ring index. */
2237         req->ring_index++;
2238         if (req->ring_index == req->length) {
2239                 req->ring_index = 0;
2240                 req->ring_ptr = req->ring;
2241         } else
2242                 req->ring_ptr++;
2243
2244         /* Set chip new ring index. */
2245         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2246
2247         /* Manage unprocessed RIO/ZIO commands in response queue. */
2248         if (vha->flags.process_response_queue &&
2249             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2250                 qla24xx_process_response_queue(vha, rsp);
2251
2252         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2253
2254         return QLA_SUCCESS;
2255
2256 queuing_error:
2257         if (status & QDSS_GOT_Q_SPACE) {
2258                 req->outstanding_cmds[handle] = NULL;
2259                 req->cnt += req_cnt;
2260         }
2261         /* Cleanup will be performed by the caller (queuecommand) */
2262
2263         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2264         return QLA_FUNCTION_FAILED;
2265 }
2266
2267 /* Generic Control-SRB manipulation functions. */
2268
2269 /* hardware_lock assumed to be held. */
2270
2271 void *
2272 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2273 {
2274         scsi_qla_host_t *vha = qpair->vha;
2275         struct qla_hw_data *ha = vha->hw;
2276         struct req_que *req = qpair->req;
2277         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2278         uint32_t index, handle;
2279         request_t *pkt;
2280         uint16_t cnt, req_cnt;
2281
2282         pkt = NULL;
2283         req_cnt = 1;
2284         handle = 0;
2285
2286         if (sp && (sp->type != SRB_SCSI_CMD)) {
2287                 /* Adjust entry-counts as needed. */
2288                 req_cnt = sp->iocbs;
2289         }
2290
2291         /* Check for room on request queue. */
2292         if (req->cnt < req_cnt + 2) {
2293                 if (qpair->use_shadow_reg)
2294                         cnt = *req->out_ptr;
2295                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2296                     IS_QLA28XX(ha))
2297                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2298                 else if (IS_P3P_TYPE(ha))
2299                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2300                 else if (IS_FWI2_CAPABLE(ha))
2301                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2302                 else if (IS_QLAFX00(ha))
2303                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2304                 else
2305                         cnt = qla2x00_debounce_register(
2306                             ISP_REQ_Q_OUT(ha, &reg->isp));
2307
2308                 if  (req->ring_index < cnt)
2309                         req->cnt = cnt - req->ring_index;
2310                 else
2311                         req->cnt = req->length -
2312                             (req->ring_index - cnt);
2313         }
2314         if (req->cnt < req_cnt + 2)
2315                 goto queuing_error;
2316
2317         if (sp) {
2318                 /* Check for room in outstanding command list. */
2319                 handle = req->current_outstanding_cmd;
2320                 for (index = 1; index < req->num_outstanding_cmds; index++) {
2321                         handle++;
2322                         if (handle == req->num_outstanding_cmds)
2323                                 handle = 1;
2324                         if (!req->outstanding_cmds[handle])
2325                                 break;
2326                 }
2327                 if (index == req->num_outstanding_cmds) {
2328                         ql_log(ql_log_warn, vha, 0x700b,
2329                             "No room on outstanding cmd array.\n");
2330                         goto queuing_error;
2331                 }
2332
2333                 /* Prep command array. */
2334                 req->current_outstanding_cmd = handle;
2335                 req->outstanding_cmds[handle] = sp;
2336                 sp->handle = handle;
2337         }
2338
2339         /* Prep packet */
2340         req->cnt -= req_cnt;
2341         pkt = req->ring_ptr;
2342         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2343         if (IS_QLAFX00(ha)) {
2344                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2345                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2346         } else {
2347                 pkt->entry_count = req_cnt;
2348                 pkt->handle = handle;
2349         }
2350
2351         return pkt;
2352
2353 queuing_error:
2354         qpair->tgt_counters.num_alloc_iocb_failed++;
2355         return pkt;
2356 }
2357
2358 void *
2359 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2360 {
2361         scsi_qla_host_t *vha = qpair->vha;
2362
2363         if (qla2x00_reset_active(vha))
2364                 return NULL;
2365
2366         return __qla2x00_alloc_iocbs(qpair, sp);
2367 }
2368
2369 void *
2370 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2371 {
2372         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2373 }
2374
2375 static void
2376 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2377 {
2378         struct srb_iocb *lio = &sp->u.iocb_cmd;
2379
2380         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2381         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2382         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2383                 logio->control_flags |= LCF_NVME_PRLI;
2384                 if (sp->vha->flags.nvme_first_burst)
2385                         logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2386         }
2387
2388         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2389         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2390         logio->port_id[1] = sp->fcport->d_id.b.area;
2391         logio->port_id[2] = sp->fcport->d_id.b.domain;
2392         logio->vp_index = sp->vha->vp_idx;
2393 }
2394
2395 static void
2396 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2397 {
2398         struct srb_iocb *lio = &sp->u.iocb_cmd;
2399
2400         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2401         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2402                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2403         } else {
2404                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2405                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2406                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2407                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2408                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2409         }
2410         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2411         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2412         logio->port_id[1] = sp->fcport->d_id.b.area;
2413         logio->port_id[2] = sp->fcport->d_id.b.domain;
2414         logio->vp_index = sp->vha->vp_idx;
2415 }
2416
2417 static void
2418 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2419 {
2420         struct qla_hw_data *ha = sp->vha->hw;
2421         struct srb_iocb *lio = &sp->u.iocb_cmd;
2422         uint16_t opts;
2423
2424         mbx->entry_type = MBX_IOCB_TYPE;
2425         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2426         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2427         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2428         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2429         if (HAS_EXTENDED_IDS(ha)) {
2430                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2431                 mbx->mb10 = cpu_to_le16(opts);
2432         } else {
2433                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2434         }
2435         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2436         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2437             sp->fcport->d_id.b.al_pa);
2438         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2439 }
2440
2441 static void
2442 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2443 {
2444         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2445         logio->control_flags =
2446             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2447         if (!sp->fcport->keep_nport_handle)
2448                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2449         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2450         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2451         logio->port_id[1] = sp->fcport->d_id.b.area;
2452         logio->port_id[2] = sp->fcport->d_id.b.domain;
2453         logio->vp_index = sp->vha->vp_idx;
2454 }
2455
2456 static void
2457 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2458 {
2459         struct qla_hw_data *ha = sp->vha->hw;
2460
2461         mbx->entry_type = MBX_IOCB_TYPE;
2462         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2463         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2464         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2465             cpu_to_le16(sp->fcport->loop_id) :
2466             cpu_to_le16(sp->fcport->loop_id << 8);
2467         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2468         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2469             sp->fcport->d_id.b.al_pa);
2470         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2471         /* Implicit: mbx->mbx10 = 0. */
2472 }
2473
2474 static void
2475 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2476 {
2477         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2478         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2479         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2480         logio->vp_index = sp->vha->vp_idx;
2481 }
2482
2483 static void
2484 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2485 {
2486         struct qla_hw_data *ha = sp->vha->hw;
2487
2488         mbx->entry_type = MBX_IOCB_TYPE;
2489         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2490         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2491         if (HAS_EXTENDED_IDS(ha)) {
2492                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2493                 mbx->mb10 = cpu_to_le16(BIT_0);
2494         } else {
2495                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2496         }
2497         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2498         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2499         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2500         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2501         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2502 }
2503
2504 static void
2505 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2506 {
2507         uint32_t flags;
2508         uint64_t lun;
2509         struct fc_port *fcport = sp->fcport;
2510         scsi_qla_host_t *vha = fcport->vha;
2511         struct qla_hw_data *ha = vha->hw;
2512         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2513         struct req_que *req = vha->req;
2514
2515         flags = iocb->u.tmf.flags;
2516         lun = iocb->u.tmf.lun;
2517
2518         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2519         tsk->entry_count = 1;
2520         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2521         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2522         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2523         tsk->control_flags = cpu_to_le32(flags);
2524         tsk->port_id[0] = fcport->d_id.b.al_pa;
2525         tsk->port_id[1] = fcport->d_id.b.area;
2526         tsk->port_id[2] = fcport->d_id.b.domain;
2527         tsk->vp_index = fcport->vha->vp_idx;
2528
2529         if (flags == TCF_LUN_RESET) {
2530                 int_to_scsilun(lun, &tsk->lun);
2531                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2532                         sizeof(tsk->lun));
2533         }
2534 }
2535
2536 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2537 {
2538         timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2539         sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2540         sp->free = qla2x00_sp_free;
2541         if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2542                 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2543         add_timer(&sp->u.iocb_cmd.timer);
2544 }
2545
2546 static void
2547 qla2x00_els_dcmd_sp_free(void *data)
2548 {
2549         srb_t *sp = data;
2550         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2551
2552         kfree(sp->fcport);
2553
2554         if (elsio->u.els_logo.els_logo_pyld)
2555                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2556                     elsio->u.els_logo.els_logo_pyld,
2557                     elsio->u.els_logo.els_logo_pyld_dma);
2558
2559         del_timer(&elsio->timer);
2560         qla2x00_rel_sp(sp);
2561 }
2562
2563 static void
2564 qla2x00_els_dcmd_iocb_timeout(void *data)
2565 {
2566         srb_t *sp = data;
2567         fc_port_t *fcport = sp->fcport;
2568         struct scsi_qla_host *vha = sp->vha;
2569         struct srb_iocb *lio = &sp->u.iocb_cmd;
2570
2571         ql_dbg(ql_dbg_io, vha, 0x3069,
2572             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2573             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2574             fcport->d_id.b.al_pa);
2575
2576         complete(&lio->u.els_logo.comp);
2577 }
2578
2579 static void
2580 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2581 {
2582         srb_t *sp = ptr;
2583         fc_port_t *fcport = sp->fcport;
2584         struct srb_iocb *lio = &sp->u.iocb_cmd;
2585         struct scsi_qla_host *vha = sp->vha;
2586
2587         ql_dbg(ql_dbg_io, vha, 0x3072,
2588             "%s hdl=%x, portid=%02x%02x%02x done\n",
2589             sp->name, sp->handle, fcport->d_id.b.domain,
2590             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2591
2592         complete(&lio->u.els_logo.comp);
2593 }
2594
2595 int
2596 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2597     port_id_t remote_did)
2598 {
2599         srb_t *sp;
2600         fc_port_t *fcport = NULL;
2601         struct srb_iocb *elsio = NULL;
2602         struct qla_hw_data *ha = vha->hw;
2603         struct els_logo_payload logo_pyld;
2604         int rval = QLA_SUCCESS;
2605
2606         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2607         if (!fcport) {
2608                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2609                return -ENOMEM;
2610         }
2611
2612         /* Alloc SRB structure */
2613         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2614         if (!sp) {
2615                 kfree(fcport);
2616                 ql_log(ql_log_info, vha, 0x70e6,
2617                  "SRB allocation failed\n");
2618                 return -ENOMEM;
2619         }
2620
2621         elsio = &sp->u.iocb_cmd;
2622         fcport->loop_id = 0xFFFF;
2623         fcport->d_id.b.domain = remote_did.b.domain;
2624         fcport->d_id.b.area = remote_did.b.area;
2625         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2626
2627         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2628             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2629
2630         sp->type = SRB_ELS_DCMD;
2631         sp->name = "ELS_DCMD";
2632         sp->fcport = fcport;
2633         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2634         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2635         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2636         sp->done = qla2x00_els_dcmd_sp_done;
2637         sp->free = qla2x00_els_dcmd_sp_free;
2638
2639         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2640                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2641                             GFP_KERNEL);
2642
2643         if (!elsio->u.els_logo.els_logo_pyld) {
2644                 sp->free(sp);
2645                 return QLA_FUNCTION_FAILED;
2646         }
2647
2648         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2649
2650         elsio->u.els_logo.els_cmd = els_opcode;
2651         logo_pyld.opcode = els_opcode;
2652         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2653         logo_pyld.s_id[1] = vha->d_id.b.area;
2654         logo_pyld.s_id[2] = vha->d_id.b.domain;
2655         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2656         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2657
2658         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2659             sizeof(struct els_logo_payload));
2660
2661         rval = qla2x00_start_sp(sp);
2662         if (rval != QLA_SUCCESS) {
2663                 sp->free(sp);
2664                 return QLA_FUNCTION_FAILED;
2665         }
2666
2667         ql_dbg(ql_dbg_io, vha, 0x3074,
2668             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2669             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2670             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2671
2672         wait_for_completion(&elsio->u.els_logo.comp);
2673
2674         sp->free(sp);
2675         return rval;
2676 }
2677
2678 static void
2679 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2680 {
2681         scsi_qla_host_t *vha = sp->vha;
2682         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2683
2684         els_iocb->entry_type = ELS_IOCB_TYPE;
2685         els_iocb->entry_count = 1;
2686         els_iocb->sys_define = 0;
2687         els_iocb->entry_status = 0;
2688         els_iocb->handle = sp->handle;
2689         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2690         els_iocb->tx_dsd_count = 1;
2691         els_iocb->vp_index = vha->vp_idx;
2692         els_iocb->sof_type = EST_SOFI3;
2693         els_iocb->rx_dsd_count = 0;
2694         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2695
2696         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2697         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2698         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2699         els_iocb->s_id[0] = vha->d_id.b.al_pa;
2700         els_iocb->s_id[1] = vha->d_id.b.area;
2701         els_iocb->s_id[2] = vha->d_id.b.domain;
2702         els_iocb->control_flags = 0;
2703
2704         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2705                 els_iocb->tx_byte_count = els_iocb->tx_len =
2706                         sizeof(struct els_plogi_payload);
2707                 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2708                                    &els_iocb->tx_address);
2709                 els_iocb->rx_dsd_count = 1;
2710                 els_iocb->rx_byte_count = els_iocb->rx_len =
2711                         sizeof(struct els_plogi_payload);
2712                 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2713                                    &els_iocb->rx_address);
2714
2715                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2716                     "PLOGI ELS IOCB:\n");
2717                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2718                     (uint8_t *)els_iocb, 0x70);
2719         } else {
2720                 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2721                 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2722                                    &els_iocb->tx_address);
2723                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2724
2725                 els_iocb->rx_byte_count = 0;
2726                 els_iocb->rx_address = 0;
2727                 els_iocb->rx_len = 0;
2728         }
2729
2730         sp->vha->qla_stats.control_requests++;
2731 }
2732
2733 static void
2734 qla2x00_els_dcmd2_iocb_timeout(void *data)
2735 {
2736         srb_t *sp = data;
2737         fc_port_t *fcport = sp->fcport;
2738         struct scsi_qla_host *vha = sp->vha;
2739         struct qla_hw_data *ha = vha->hw;
2740         unsigned long flags = 0;
2741         int res;
2742
2743         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2744             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2745             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2746
2747         /* Abort the exchange */
2748         spin_lock_irqsave(&ha->hardware_lock, flags);
2749         res = ha->isp_ops->abort_command(sp);
2750         ql_dbg(ql_dbg_io, vha, 0x3070,
2751             "mbx abort_command %s\n",
2752             (res == QLA_SUCCESS) ? "successful" : "failed");
2753         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2754
2755         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2756 }
2757
2758 static void
2759 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2760 {
2761         srb_t *sp = ptr;
2762         fc_port_t *fcport = sp->fcport;
2763         struct srb_iocb *lio = &sp->u.iocb_cmd;
2764         struct scsi_qla_host *vha = sp->vha;
2765         struct event_arg ea;
2766         struct qla_work_evt *e;
2767
2768         ql_dbg(ql_dbg_disc, vha, 0x3072,
2769             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2770             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2771
2772         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2773         del_timer(&sp->u.iocb_cmd.timer);
2774
2775         if (sp->flags & SRB_WAKEUP_ON_COMP)
2776                 complete(&lio->u.els_plogi.comp);
2777         else {
2778                 if (res) {
2779                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2780                 } else {
2781                         memset(&ea, 0, sizeof(ea));
2782                         ea.fcport = fcport;
2783                         ea.rc = res;
2784                         ea.event = FCME_ELS_PLOGI_DONE;
2785                         qla2x00_fcport_event_handler(vha, &ea);
2786                 }
2787
2788                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2789                 if (!e) {
2790                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2791
2792                         if (elsio->u.els_plogi.els_plogi_pyld)
2793                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2794                                     elsio->u.els_plogi.tx_size,
2795                                     elsio->u.els_plogi.els_plogi_pyld,
2796                                     elsio->u.els_plogi.els_plogi_pyld_dma);
2797
2798                         if (elsio->u.els_plogi.els_resp_pyld)
2799                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2800                                     elsio->u.els_plogi.rx_size,
2801                                     elsio->u.els_plogi.els_resp_pyld,
2802                                     elsio->u.els_plogi.els_resp_pyld_dma);
2803                         sp->free(sp);
2804                         return;
2805                 }
2806                 e->u.iosb.sp = sp;
2807                 qla2x00_post_work(vha, e);
2808         }
2809 }
2810
2811 int
2812 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2813     fc_port_t *fcport, bool wait)
2814 {
2815         srb_t *sp;
2816         struct srb_iocb *elsio = NULL;
2817         struct qla_hw_data *ha = vha->hw;
2818         int rval = QLA_SUCCESS;
2819         void    *ptr, *resp_ptr;
2820
2821         /* Alloc SRB structure */
2822         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2823         if (!sp) {
2824                 ql_log(ql_log_info, vha, 0x70e6,
2825                  "SRB allocation failed\n");
2826                 return -ENOMEM;
2827         }
2828
2829         elsio = &sp->u.iocb_cmd;
2830         ql_dbg(ql_dbg_io, vha, 0x3073,
2831             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2832
2833         fcport->flags |= FCF_ASYNC_SENT;
2834         sp->type = SRB_ELS_DCMD;
2835         sp->name = "ELS_DCMD";
2836         sp->fcport = fcport;
2837
2838         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2839         init_completion(&elsio->u.els_plogi.comp);
2840         if (wait)
2841                 sp->flags = SRB_WAKEUP_ON_COMP;
2842
2843         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2844
2845         sp->done = qla2x00_els_dcmd2_sp_done;
2846         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2847
2848         ptr = elsio->u.els_plogi.els_plogi_pyld =
2849             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2850                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2851
2852         if (!elsio->u.els_plogi.els_plogi_pyld) {
2853                 rval = QLA_FUNCTION_FAILED;
2854                 goto out;
2855         }
2856
2857         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2858             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2859                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2860
2861         if (!elsio->u.els_plogi.els_resp_pyld) {
2862                 rval = QLA_FUNCTION_FAILED;
2863                 goto out;
2864         }
2865
2866         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2867
2868         memset(ptr, 0, sizeof(struct els_plogi_payload));
2869         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2870         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2871             &ha->plogi_els_payld.data,
2872             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2873
2874         elsio->u.els_plogi.els_cmd = els_opcode;
2875         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2876
2877         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2878         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2879             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2880
2881         rval = qla2x00_start_sp(sp);
2882         if (rval != QLA_SUCCESS) {
2883                 rval = QLA_FUNCTION_FAILED;
2884         } else {
2885                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2886                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2887                     sp->name, sp->handle, fcport->loop_id,
2888                     fcport->d_id.b24, vha->d_id.b24);
2889         }
2890
2891         if (wait) {
2892                 wait_for_completion(&elsio->u.els_plogi.comp);
2893
2894                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2895                         rval = QLA_FUNCTION_FAILED;
2896         } else {
2897                 goto done;
2898         }
2899
2900 out:
2901         fcport->flags &= ~(FCF_ASYNC_SENT);
2902         if (elsio->u.els_plogi.els_plogi_pyld)
2903                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2904                     elsio->u.els_plogi.tx_size,
2905                     elsio->u.els_plogi.els_plogi_pyld,
2906                     elsio->u.els_plogi.els_plogi_pyld_dma);
2907
2908         if (elsio->u.els_plogi.els_resp_pyld)
2909                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2910                     elsio->u.els_plogi.rx_size,
2911                     elsio->u.els_plogi.els_resp_pyld,
2912                     elsio->u.els_plogi.els_resp_pyld_dma);
2913
2914         sp->free(sp);
2915 done:
2916         return rval;
2917 }
2918
2919 static void
2920 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2921 {
2922         struct bsg_job *bsg_job = sp->u.bsg_job;
2923         struct fc_bsg_request *bsg_request = bsg_job->request;
2924
2925         els_iocb->entry_type = ELS_IOCB_TYPE;
2926         els_iocb->entry_count = 1;
2927         els_iocb->sys_define = 0;
2928         els_iocb->entry_status = 0;
2929         els_iocb->handle = sp->handle;
2930         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2931         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2932         els_iocb->vp_index = sp->vha->vp_idx;
2933         els_iocb->sof_type = EST_SOFI3;
2934         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2935
2936         els_iocb->opcode =
2937             sp->type == SRB_ELS_CMD_RPT ?
2938             bsg_request->rqst_data.r_els.els_code :
2939             bsg_request->rqst_data.h_els.command_code;
2940         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2941         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2942         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2943         els_iocb->control_flags = 0;
2944         els_iocb->rx_byte_count =
2945             cpu_to_le32(bsg_job->reply_payload.payload_len);
2946         els_iocb->tx_byte_count =
2947             cpu_to_le32(bsg_job->request_payload.payload_len);
2948
2949         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2950                            &els_iocb->tx_address);
2951         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2952             (bsg_job->request_payload.sg_list));
2953
2954         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2955                            &els_iocb->rx_address);
2956         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2957             (bsg_job->reply_payload.sg_list));
2958
2959         sp->vha->qla_stats.control_requests++;
2960 }
2961
2962 static void
2963 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2964 {
2965         uint16_t        avail_dsds;
2966         struct dsd64    *cur_dsd;
2967         struct scatterlist *sg;
2968         int index;
2969         uint16_t tot_dsds;
2970         scsi_qla_host_t *vha = sp->vha;
2971         struct qla_hw_data *ha = vha->hw;
2972         struct bsg_job *bsg_job = sp->u.bsg_job;
2973         int entry_count = 1;
2974
2975         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2976         ct_iocb->entry_type = CT_IOCB_TYPE;
2977         ct_iocb->entry_status = 0;
2978         ct_iocb->handle1 = sp->handle;
2979         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2980         ct_iocb->status = cpu_to_le16(0);
2981         ct_iocb->control_flags = cpu_to_le16(0);
2982         ct_iocb->timeout = 0;
2983         ct_iocb->cmd_dsd_count =
2984             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2985         ct_iocb->total_dsd_count =
2986             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2987         ct_iocb->req_bytecount =
2988             cpu_to_le32(bsg_job->request_payload.payload_len);
2989         ct_iocb->rsp_bytecount =
2990             cpu_to_le32(bsg_job->reply_payload.payload_len);
2991
2992         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2993                            &ct_iocb->req_dsd.address);
2994         ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
2995
2996         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2997                            &ct_iocb->rsp_dsd.address);
2998         ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
2999
3000         avail_dsds = 1;
3001         cur_dsd = &ct_iocb->rsp_dsd;
3002         index = 0;
3003         tot_dsds = bsg_job->reply_payload.sg_cnt;
3004
3005         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3006                 cont_a64_entry_t *cont_pkt;
3007
3008                 /* Allocate additional continuation packets? */
3009                 if (avail_dsds == 0) {
3010                         /*
3011                         * Five DSDs are available in the Cont.
3012                         * Type 1 IOCB.
3013                                */
3014                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3015                             vha->hw->req_q_map[0]);
3016                         cur_dsd = cont_pkt->dsd;
3017                         avail_dsds = 5;
3018                         entry_count++;
3019                 }
3020
3021                 append_dsd64(&cur_dsd, sg);
3022                 avail_dsds--;
3023         }
3024         ct_iocb->entry_count = entry_count;
3025
3026         sp->vha->qla_stats.control_requests++;
3027 }
3028
3029 static void
3030 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3031 {
3032         uint16_t        avail_dsds;
3033         struct dsd64    *cur_dsd;
3034         struct scatterlist *sg;
3035         int index;
3036         uint16_t cmd_dsds, rsp_dsds;
3037         scsi_qla_host_t *vha = sp->vha;
3038         struct qla_hw_data *ha = vha->hw;
3039         struct bsg_job *bsg_job = sp->u.bsg_job;
3040         int entry_count = 1;
3041         cont_a64_entry_t *cont_pkt = NULL;
3042
3043         ct_iocb->entry_type = CT_IOCB_TYPE;
3044         ct_iocb->entry_status = 0;
3045         ct_iocb->sys_define = 0;
3046         ct_iocb->handle = sp->handle;
3047
3048         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3049         ct_iocb->vp_index = sp->vha->vp_idx;
3050         ct_iocb->comp_status = cpu_to_le16(0);
3051
3052         cmd_dsds = bsg_job->request_payload.sg_cnt;
3053         rsp_dsds = bsg_job->reply_payload.sg_cnt;
3054
3055         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3056         ct_iocb->timeout = 0;
3057         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3058         ct_iocb->cmd_byte_count =
3059             cpu_to_le32(bsg_job->request_payload.payload_len);
3060
3061         avail_dsds = 2;
3062         cur_dsd = ct_iocb->dsd;
3063         index = 0;
3064
3065         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3066                 /* Allocate additional continuation packets? */
3067                 if (avail_dsds == 0) {
3068                         /*
3069                          * Five DSDs are available in the Cont.
3070                          * Type 1 IOCB.
3071                          */
3072                         cont_pkt = qla2x00_prep_cont_type1_iocb(
3073                             vha, ha->req_q_map[0]);
3074                         cur_dsd = cont_pkt->dsd;
3075                         avail_dsds = 5;
3076                         entry_count++;
3077                 }
3078
3079                 append_dsd64(&cur_dsd, sg);
3080                 avail_dsds--;
3081         }
3082
3083         index = 0;
3084
3085         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3086                 /* Allocate additional continuation packets? */
3087                 if (avail_dsds == 0) {
3088                         /*
3089                         * Five DSDs are available in the Cont.
3090                         * Type 1 IOCB.
3091                                */
3092                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3093                             ha->req_q_map[0]);
3094                         cur_dsd = cont_pkt->dsd;
3095                         avail_dsds = 5;
3096                         entry_count++;
3097                 }
3098
3099                 append_dsd64(&cur_dsd, sg);
3100                 avail_dsds--;
3101         }
3102         ct_iocb->entry_count = entry_count;
3103 }
3104
3105 /*
3106  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3107  * @sp: command to send to the ISP
3108  *
3109  * Returns non-zero if a failure occurred, else zero.
3110  */
3111 int
3112 qla82xx_start_scsi(srb_t *sp)
3113 {
3114         int             nseg;
3115         unsigned long   flags;
3116         struct scsi_cmnd *cmd;
3117         uint32_t        *clr_ptr;
3118         uint32_t        index;
3119         uint32_t        handle;
3120         uint16_t        cnt;
3121         uint16_t        req_cnt;
3122         uint16_t        tot_dsds;
3123         struct device_reg_82xx __iomem *reg;
3124         uint32_t dbval;
3125         uint32_t *fcp_dl;
3126         uint8_t additional_cdb_len;
3127         struct ct6_dsd *ctx;
3128         struct scsi_qla_host *vha = sp->vha;
3129         struct qla_hw_data *ha = vha->hw;
3130         struct req_que *req = NULL;
3131         struct rsp_que *rsp = NULL;
3132
3133         /* Setup device pointers. */
3134         reg = &ha->iobase->isp82;
3135         cmd = GET_CMD_SP(sp);
3136         req = vha->req;
3137         rsp = ha->rsp_q_map[0];
3138
3139         /* So we know we haven't pci_map'ed anything yet */
3140         tot_dsds = 0;
3141
3142         dbval = 0x04 | (ha->portnum << 5);
3143
3144         /* Send marker if required */
3145         if (vha->marker_needed != 0) {
3146                 if (qla2x00_marker(vha, ha->base_qpair,
3147                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3148                         ql_log(ql_log_warn, vha, 0x300c,
3149                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3150                         return QLA_FUNCTION_FAILED;
3151                 }
3152                 vha->marker_needed = 0;
3153         }
3154
3155         /* Acquire ring specific lock */
3156         spin_lock_irqsave(&ha->hardware_lock, flags);
3157
3158         /* Check for room in outstanding command list. */
3159         handle = req->current_outstanding_cmd;
3160         for (index = 1; index < req->num_outstanding_cmds; index++) {
3161                 handle++;
3162                 if (handle == req->num_outstanding_cmds)
3163                         handle = 1;
3164                 if (!req->outstanding_cmds[handle])
3165                         break;
3166         }
3167         if (index == req->num_outstanding_cmds)
3168                 goto queuing_error;
3169
3170         /* Map the sg table so we have an accurate count of sg entries needed */
3171         if (scsi_sg_count(cmd)) {
3172                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3173                     scsi_sg_count(cmd), cmd->sc_data_direction);
3174                 if (unlikely(!nseg))
3175                         goto queuing_error;
3176         } else
3177                 nseg = 0;
3178
3179         tot_dsds = nseg;
3180
3181         if (tot_dsds > ql2xshiftctondsd) {
3182                 struct cmd_type_6 *cmd_pkt;
3183                 uint16_t more_dsd_lists = 0;
3184                 struct dsd_dma *dsd_ptr;
3185                 uint16_t i;
3186
3187                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3188                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3189                         ql_dbg(ql_dbg_io, vha, 0x300d,
3190                             "Num of DSD list %d is than %d for cmd=%p.\n",
3191                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3192                             cmd);
3193                         goto queuing_error;
3194                 }
3195
3196                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3197                         goto sufficient_dsds;
3198                 else
3199                         more_dsd_lists -= ha->gbl_dsd_avail;
3200
3201                 for (i = 0; i < more_dsd_lists; i++) {
3202                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3203                         if (!dsd_ptr) {
3204                                 ql_log(ql_log_fatal, vha, 0x300e,
3205                                     "Failed to allocate memory for dsd_dma "
3206                                     "for cmd=%p.\n", cmd);
3207                                 goto queuing_error;
3208                         }
3209
3210                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3211                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3212                         if (!dsd_ptr->dsd_addr) {
3213                                 kfree(dsd_ptr);
3214                                 ql_log(ql_log_fatal, vha, 0x300f,
3215                                     "Failed to allocate memory for dsd_addr "
3216                                     "for cmd=%p.\n", cmd);
3217                                 goto queuing_error;
3218                         }
3219                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3220                         ha->gbl_dsd_avail++;
3221                 }
3222
3223 sufficient_dsds:
3224                 req_cnt = 1;
3225
3226                 if (req->cnt < (req_cnt + 2)) {
3227                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3228                                 &reg->req_q_out[0]);
3229                         if (req->ring_index < cnt)
3230                                 req->cnt = cnt - req->ring_index;
3231                         else
3232                                 req->cnt = req->length -
3233                                         (req->ring_index - cnt);
3234                         if (req->cnt < (req_cnt + 2))
3235                                 goto queuing_error;
3236                 }
3237
3238                 ctx = sp->u.scmd.ctx =
3239                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3240                 if (!ctx) {
3241                         ql_log(ql_log_fatal, vha, 0x3010,
3242                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3243                         goto queuing_error;
3244                 }
3245
3246                 memset(ctx, 0, sizeof(struct ct6_dsd));
3247                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3248                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3249                 if (!ctx->fcp_cmnd) {
3250                         ql_log(ql_log_fatal, vha, 0x3011,
3251                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3252                         goto queuing_error;
3253                 }
3254
3255                 /* Initialize the DSD list and dma handle */
3256                 INIT_LIST_HEAD(&ctx->dsd_list);
3257                 ctx->dsd_use_cnt = 0;
3258
3259                 if (cmd->cmd_len > 16) {
3260                         additional_cdb_len = cmd->cmd_len - 16;
3261                         if ((cmd->cmd_len % 4) != 0) {
3262                                 /* SCSI command bigger than 16 bytes must be
3263                                  * multiple of 4
3264                                  */
3265                                 ql_log(ql_log_warn, vha, 0x3012,
3266                                     "scsi cmd len %d not multiple of 4 "
3267                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3268                                 goto queuing_error_fcp_cmnd;
3269                         }
3270                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3271                 } else {
3272                         additional_cdb_len = 0;
3273                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3274                 }
3275
3276                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3277                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3278
3279                 /* Zero out remaining portion of packet. */
3280                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3281                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3282                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3283                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3284
3285                 /* Set NPORT-ID and LUN number*/
3286                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3287                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3288                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3289                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3290                 cmd_pkt->vp_index = sp->vha->vp_idx;
3291
3292                 /* Build IOCB segments */
3293                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3294                         goto queuing_error_fcp_cmnd;
3295
3296                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3297                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3298
3299                 /* build FCP_CMND IU */
3300                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3301                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3302
3303                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3304                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3305                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3306                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3307
3308                 /* Populate the FCP_PRIO. */
3309                 if (ha->flags.fcp_prio_enabled)
3310                         ctx->fcp_cmnd->task_attribute |=
3311                             sp->fcport->fcp_prio << 3;
3312
3313                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3314
3315                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3316                     additional_cdb_len);
3317                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3318
3319                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3320                 put_unaligned_le64(ctx->fcp_cmnd_dma,
3321                                    &cmd_pkt->fcp_cmnd_dseg_address);
3322
3323                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3324                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3325                 /* Set total data segment count. */
3326                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3327                 /* Specify response queue number where
3328                  * completion should happen
3329                  */
3330                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3331         } else {
3332                 struct cmd_type_7 *cmd_pkt;
3333
3334                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3335                 if (req->cnt < (req_cnt + 2)) {
3336                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3337                             &reg->req_q_out[0]);
3338                         if (req->ring_index < cnt)
3339                                 req->cnt = cnt - req->ring_index;
3340                         else
3341                                 req->cnt = req->length -
3342                                         (req->ring_index - cnt);
3343                 }
3344                 if (req->cnt < (req_cnt + 2))
3345                         goto queuing_error;
3346
3347                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3348                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3349
3350                 /* Zero out remaining portion of packet. */
3351                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3352                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3353                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3354                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3355
3356                 /* Set NPORT-ID and LUN number*/
3357                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3358                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3359                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3360                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3361                 cmd_pkt->vp_index = sp->vha->vp_idx;
3362
3363                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3364                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3365                     sizeof(cmd_pkt->lun));
3366
3367                 /* Populate the FCP_PRIO. */
3368                 if (ha->flags.fcp_prio_enabled)
3369                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3370
3371                 /* Load SCSI command packet. */
3372                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3373                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3374
3375                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3376
3377                 /* Build IOCB segments */
3378                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3379
3380                 /* Set total data segment count. */
3381                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3382                 /* Specify response queue number where
3383                  * completion should happen.
3384                  */
3385                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3386
3387         }
3388         /* Build command packet. */
3389         req->current_outstanding_cmd = handle;
3390         req->outstanding_cmds[handle] = sp;
3391         sp->handle = handle;
3392         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3393         req->cnt -= req_cnt;
3394         wmb();
3395
3396         /* Adjust ring index. */
3397         req->ring_index++;
3398         if (req->ring_index == req->length) {
3399                 req->ring_index = 0;
3400                 req->ring_ptr = req->ring;
3401         } else
3402                 req->ring_ptr++;
3403
3404         sp->flags |= SRB_DMA_VALID;
3405
3406         /* Set chip new ring index. */
3407         /* write, read and verify logic */
3408         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3409         if (ql2xdbwr)
3410                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3411         else {
3412                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3413                 wmb();
3414                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3415                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3416                         wmb();
3417                 }
3418         }
3419
3420         /* Manage unprocessed RIO/ZIO commands in response queue. */
3421         if (vha->flags.process_response_queue &&
3422             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3423                 qla24xx_process_response_queue(vha, rsp);
3424
3425         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3426         return QLA_SUCCESS;
3427
3428 queuing_error_fcp_cmnd:
3429         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3430 queuing_error:
3431         if (tot_dsds)
3432                 scsi_dma_unmap(cmd);
3433
3434         if (sp->u.scmd.ctx) {
3435                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3436                 sp->u.scmd.ctx = NULL;
3437         }
3438         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3439
3440         return QLA_FUNCTION_FAILED;
3441 }
3442
3443 static void
3444 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3445 {
3446         struct srb_iocb *aio = &sp->u.iocb_cmd;
3447         scsi_qla_host_t *vha = sp->vha;
3448         struct req_que *req = sp->qpair->req;
3449
3450         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3451         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3452         abt_iocb->entry_count = 1;
3453         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3454         if (sp->fcport) {
3455                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3456                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3457                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3458                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3459         }
3460         abt_iocb->handle_to_abort =
3461             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3462                                     aio->u.abt.cmd_hndl));
3463         abt_iocb->vp_index = vha->vp_idx;
3464         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3465         /* Send the command to the firmware */
3466         wmb();
3467 }
3468
3469 static void
3470 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3471 {
3472         int i, sz;
3473
3474         mbx->entry_type = MBX_IOCB_TYPE;
3475         mbx->handle = sp->handle;
3476         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3477
3478         for (i = 0; i < sz; i++)
3479                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3480 }
3481
3482 static void
3483 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3484 {
3485         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3486         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3487         ct_pkt->handle = sp->handle;
3488 }
3489
3490 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3491         struct nack_to_isp *nack)
3492 {
3493         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3494
3495         nack->entry_type = NOTIFY_ACK_TYPE;
3496         nack->entry_count = 1;
3497         nack->ox_id = ntfy->ox_id;
3498
3499         nack->u.isp24.handle = sp->handle;
3500         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3501         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3502                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3503                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3504         }
3505         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3506         nack->u.isp24.status = ntfy->u.isp24.status;
3507         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3508         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3509         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3510         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3511         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3512         nack->u.isp24.srr_flags = 0;
3513         nack->u.isp24.srr_reject_code = 0;
3514         nack->u.isp24.srr_reject_code_expl = 0;
3515         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3516 }
3517
3518 /*
3519  * Build NVME LS request
3520  */
3521 static int
3522 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3523 {
3524         struct srb_iocb *nvme;
3525         int     rval = QLA_SUCCESS;
3526
3527         nvme = &sp->u.iocb_cmd;
3528         cmd_pkt->entry_type = PT_LS4_REQUEST;
3529         cmd_pkt->entry_count = 1;
3530         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3531
3532         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3533         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3534         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3535
3536         cmd_pkt->tx_dseg_count = 1;
3537         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3538         cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3539         put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3540
3541         cmd_pkt->rx_dseg_count = 1;
3542         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3543         cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
3544         put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3545
3546         return rval;
3547 }
3548
3549 static void
3550 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3551 {
3552         int map, pos;
3553
3554         vce->entry_type = VP_CTRL_IOCB_TYPE;
3555         vce->handle = sp->handle;
3556         vce->entry_count = 1;
3557         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3558         vce->vp_count = cpu_to_le16(1);
3559
3560         /*
3561          * index map in firmware starts with 1; decrement index
3562          * this is ok as we never use index 0
3563          */
3564         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3565         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3566         vce->vp_idx_map[map] |= 1 << pos;
3567 }
3568
3569 static void
3570 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3571 {
3572         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3573         logio->control_flags =
3574             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3575
3576         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3577         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3578         logio->port_id[1] = sp->fcport->d_id.b.area;
3579         logio->port_id[2] = sp->fcport->d_id.b.domain;
3580         logio->vp_index = sp->fcport->vha->vp_idx;
3581 }
3582
3583 int
3584 qla2x00_start_sp(srb_t *sp)
3585 {
3586         int rval = QLA_SUCCESS;
3587         scsi_qla_host_t *vha = sp->vha;
3588         struct qla_hw_data *ha = vha->hw;
3589         struct qla_qpair *qp = sp->qpair;
3590         void *pkt;
3591         unsigned long flags;
3592
3593         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3594         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3595         if (!pkt) {
3596                 rval = EAGAIN;
3597                 ql_log(ql_log_warn, vha, 0x700c,
3598                     "qla2x00_alloc_iocbs failed.\n");
3599                 goto done;
3600         }
3601
3602         switch (sp->type) {
3603         case SRB_LOGIN_CMD:
3604                 IS_FWI2_CAPABLE(ha) ?
3605                     qla24xx_login_iocb(sp, pkt) :
3606                     qla2x00_login_iocb(sp, pkt);
3607                 break;
3608         case SRB_PRLI_CMD:
3609                 qla24xx_prli_iocb(sp, pkt);
3610                 break;
3611         case SRB_LOGOUT_CMD:
3612                 IS_FWI2_CAPABLE(ha) ?
3613                     qla24xx_logout_iocb(sp, pkt) :
3614                     qla2x00_logout_iocb(sp, pkt);
3615                 break;
3616         case SRB_ELS_CMD_RPT:
3617         case SRB_ELS_CMD_HST:
3618                 qla24xx_els_iocb(sp, pkt);
3619                 break;
3620         case SRB_CT_CMD:
3621                 IS_FWI2_CAPABLE(ha) ?
3622                     qla24xx_ct_iocb(sp, pkt) :
3623                     qla2x00_ct_iocb(sp, pkt);
3624                 break;
3625         case SRB_ADISC_CMD:
3626                 IS_FWI2_CAPABLE(ha) ?
3627                     qla24xx_adisc_iocb(sp, pkt) :
3628                     qla2x00_adisc_iocb(sp, pkt);
3629                 break;
3630         case SRB_TM_CMD:
3631                 IS_QLAFX00(ha) ?
3632                     qlafx00_tm_iocb(sp, pkt) :
3633                     qla24xx_tm_iocb(sp, pkt);
3634                 break;
3635         case SRB_FXIOCB_DCMD:
3636         case SRB_FXIOCB_BCMD:
3637                 qlafx00_fxdisc_iocb(sp, pkt);
3638                 break;
3639         case SRB_NVME_LS:
3640                 qla_nvme_ls(sp, pkt);
3641                 break;
3642         case SRB_ABT_CMD:
3643                 IS_QLAFX00(ha) ?
3644                         qlafx00_abort_iocb(sp, pkt) :
3645                         qla24xx_abort_iocb(sp, pkt);
3646                 break;
3647         case SRB_ELS_DCMD:
3648                 qla24xx_els_logo_iocb(sp, pkt);
3649                 break;
3650         case SRB_CT_PTHRU_CMD:
3651                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3652                 break;
3653         case SRB_MB_IOCB:
3654                 qla2x00_mb_iocb(sp, pkt);
3655                 break;
3656         case SRB_NACK_PLOGI:
3657         case SRB_NACK_PRLI:
3658         case SRB_NACK_LOGO:
3659                 qla2x00_send_notify_ack_iocb(sp, pkt);
3660                 break;
3661         case SRB_CTRL_VP:
3662                 qla25xx_ctrlvp_iocb(sp, pkt);
3663                 break;
3664         case SRB_PRLO_CMD:
3665                 qla24xx_prlo_iocb(sp, pkt);
3666                 break;
3667         default:
3668                 break;
3669         }
3670
3671         wmb();
3672         qla2x00_start_iocbs(vha, qp->req);
3673 done:
3674         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3675         return rval;
3676 }
3677
3678 static void
3679 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3680                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3681 {
3682         uint16_t avail_dsds;
3683         struct dsd64 *cur_dsd;
3684         uint32_t req_data_len = 0;
3685         uint32_t rsp_data_len = 0;
3686         struct scatterlist *sg;
3687         int index;
3688         int entry_count = 1;
3689         struct bsg_job *bsg_job = sp->u.bsg_job;
3690
3691         /*Update entry type to indicate bidir command */
3692         put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3693
3694         /* Set the transfer direction, in this set both flags
3695          * Also set the BD_WRAP_BACK flag, firmware will take care
3696          * assigning DID=SID for outgoing pkts.
3697          */
3698         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3699         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3700         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3701                                                         BD_WRAP_BACK);
3702
3703         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3704         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3705         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3706         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3707
3708         vha->bidi_stats.transfer_bytes += req_data_len;
3709         vha->bidi_stats.io_count++;
3710
3711         vha->qla_stats.output_bytes += req_data_len;
3712         vha->qla_stats.output_requests++;
3713
3714         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3715          * are bundled in continuation iocb
3716          */
3717         avail_dsds = 1;
3718         cur_dsd = &cmd_pkt->fcp_dsd;
3719
3720         index = 0;
3721
3722         for_each_sg(bsg_job->request_payload.sg_list, sg,
3723                                 bsg_job->request_payload.sg_cnt, index) {
3724                 cont_a64_entry_t *cont_pkt;
3725
3726                 /* Allocate additional continuation packets */
3727                 if (avail_dsds == 0) {
3728                         /* Continuation type 1 IOCB can accomodate
3729                          * 5 DSDS
3730                          */
3731                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3732                         cur_dsd = cont_pkt->dsd;
3733                         avail_dsds = 5;
3734                         entry_count++;
3735                 }
3736                 append_dsd64(&cur_dsd, sg);
3737                 avail_dsds--;
3738         }
3739         /* For read request DSD will always goes to continuation IOCB
3740          * and follow the write DSD. If there is room on the current IOCB
3741          * then it is added to that IOCB else new continuation IOCB is
3742          * allocated.
3743          */
3744         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3745                                 bsg_job->reply_payload.sg_cnt, index) {
3746                 cont_a64_entry_t *cont_pkt;
3747
3748                 /* Allocate additional continuation packets */
3749                 if (avail_dsds == 0) {
3750                         /* Continuation type 1 IOCB can accomodate
3751                          * 5 DSDS
3752                          */
3753                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3754                         cur_dsd = cont_pkt->dsd;
3755                         avail_dsds = 5;
3756                         entry_count++;
3757                 }
3758                 append_dsd64(&cur_dsd, sg);
3759                 avail_dsds--;
3760         }
3761         /* This value should be same as number of IOCB required for this cmd */
3762         cmd_pkt->entry_count = entry_count;
3763 }
3764
3765 int
3766 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3767 {
3768
3769         struct qla_hw_data *ha = vha->hw;
3770         unsigned long flags;
3771         uint32_t handle;
3772         uint32_t index;
3773         uint16_t req_cnt;
3774         uint16_t cnt;
3775         uint32_t *clr_ptr;
3776         struct cmd_bidir *cmd_pkt = NULL;
3777         struct rsp_que *rsp;
3778         struct req_que *req;
3779         int rval = EXT_STATUS_OK;
3780
3781         rval = QLA_SUCCESS;
3782
3783         rsp = ha->rsp_q_map[0];
3784         req = vha->req;
3785
3786         /* Send marker if required */
3787         if (vha->marker_needed != 0) {
3788                 if (qla2x00_marker(vha, ha->base_qpair,
3789                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3790                         return EXT_STATUS_MAILBOX;
3791                 vha->marker_needed = 0;
3792         }
3793
3794         /* Acquire ring specific lock */
3795         spin_lock_irqsave(&ha->hardware_lock, flags);
3796
3797         /* Check for room in outstanding command list. */
3798         handle = req->current_outstanding_cmd;
3799         for (index = 1; index < req->num_outstanding_cmds; index++) {
3800                 handle++;
3801                 if (handle == req->num_outstanding_cmds)
3802                         handle = 1;
3803                 if (!req->outstanding_cmds[handle])
3804                         break;
3805         }
3806
3807         if (index == req->num_outstanding_cmds) {
3808                 rval = EXT_STATUS_BUSY;
3809                 goto queuing_error;
3810         }
3811
3812         /* Calculate number of IOCB required */
3813         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3814
3815         /* Check for room on request queue. */
3816         if (req->cnt < req_cnt + 2) {
3817                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3818                     RD_REG_DWORD_RELAXED(req->req_q_out);
3819                 if  (req->ring_index < cnt)
3820                         req->cnt = cnt - req->ring_index;
3821                 else
3822                         req->cnt = req->length -
3823                                 (req->ring_index - cnt);
3824         }
3825         if (req->cnt < req_cnt + 2) {
3826                 rval = EXT_STATUS_BUSY;
3827                 goto queuing_error;
3828         }
3829
3830         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3831         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3832
3833         /* Zero out remaining portion of packet. */
3834         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3835         clr_ptr = (uint32_t *)cmd_pkt + 2;
3836         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3837
3838         /* Set NPORT-ID  (of vha)*/
3839         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3840         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3841         cmd_pkt->port_id[1] = vha->d_id.b.area;
3842         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3843
3844         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3845         cmd_pkt->entry_status = (uint8_t) rsp->id;
3846         /* Build command packet. */
3847         req->current_outstanding_cmd = handle;
3848         req->outstanding_cmds[handle] = sp;
3849         sp->handle = handle;
3850         req->cnt -= req_cnt;
3851
3852         /* Send the command to the firmware */
3853         wmb();
3854         qla2x00_start_iocbs(vha, req);
3855 queuing_error:
3856         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3857         return rval;
3858 }