1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * Returns the proper CF_* direction based on CDB.
20 static inline uint16_t
21 qla2x00_get_cmd_direction(srb_t *sp)
24 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 struct scsi_qla_host *vha = sp->vha;
29 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 vha->qla_stats.output_requests++;
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 vha->qla_stats.input_requests++;
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment descriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment descriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @req: request queue
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 /* We always use DIFF Bundling for best performance */
152 /* Translate SCSI opcode to a protection opcode */
153 switch (scsi_get_prot_op(cmd)) {
154 case SCSI_PROT_READ_STRIP:
155 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
157 case SCSI_PROT_WRITE_INSERT:
158 *fw_prot_opts |= PO_MODE_DIF_INSERT;
160 case SCSI_PROT_READ_INSERT:
161 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 case SCSI_PROT_WRITE_STRIP:
164 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
166 case SCSI_PROT_READ_PASS:
167 case SCSI_PROT_WRITE_PASS:
168 if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
169 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 *fw_prot_opts |= PO_MODE_DIF_PASS;
173 default: /* Normal Request */
174 *fw_prot_opts |= PO_MODE_DIF_PASS;
178 if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
179 *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
181 return scsi_prot_sg_count(cmd);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 struct dsd32 *cur_dsd;
197 scsi_qla_host_t *vha;
198 struct scsi_cmnd *cmd;
199 struct scatterlist *sg;
202 cmd = GET_CMD_SP(sp);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = cpu_to_le32(0);
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
218 cur_dsd = cmd_pkt->dsd32;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = cont_pkt->dsd;
232 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
235 append_dsd32(&cur_dsd, sg);
241 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
242 * capable IOCB types.
244 * @sp: SRB command to process
245 * @cmd_pkt: Command type 3 IOCB
246 * @tot_dsds: Total number of segments to transfer
248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252 struct dsd64 *cur_dsd;
253 scsi_qla_host_t *vha;
254 struct scsi_cmnd *cmd;
255 struct scatterlist *sg;
258 cmd = GET_CMD_SP(sp);
260 /* Update entry type to indicate Command Type 3 IOCB */
261 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
263 /* No data transfer */
264 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
265 cmd_pkt->byte_count = cpu_to_le32(0);
270 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272 /* Two DSDs are available in the Command Type 3 IOCB */
273 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
274 cur_dsd = cmd_pkt->dsd64;
276 /* Load data segments */
277 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
278 cont_a64_entry_t *cont_pkt;
280 /* Allocate additional continuation packets? */
281 if (avail_dsds == 0) {
283 * Five DSDs are available in the Continuation
286 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
287 cur_dsd = cont_pkt->dsd;
288 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
291 append_dsd64(&cur_dsd, sg);
297 * Find the first handle that is not in use, starting from
298 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
299 * associated with @req.
301 uint32_t qla2xxx_get_next_handle(struct req_que *req)
303 uint32_t index, handle = req->current_outstanding_cmd;
305 for (index = 1; index < req->num_outstanding_cmds; index++) {
307 if (handle == req->num_outstanding_cmds)
309 if (!req->outstanding_cmds[handle])
317 * qla2x00_start_scsi() - Send a SCSI command to the ISP
318 * @sp: command to send to the ISP
320 * Returns non-zero if a failure occurred, else zero.
323 qla2x00_start_scsi(srb_t *sp)
327 scsi_qla_host_t *vha;
328 struct scsi_cmnd *cmd;
331 cmd_entry_t *cmd_pkt;
335 struct device_reg_2xxx __iomem *reg;
336 struct qla_hw_data *ha;
340 /* Setup device pointers. */
343 reg = &ha->iobase->isp;
344 cmd = GET_CMD_SP(sp);
345 req = ha->req_q_map[0];
346 rsp = ha->rsp_q_map[0];
347 /* So we know we haven't pci_map'ed anything yet */
350 /* Send marker if required */
351 if (vha->marker_needed != 0) {
352 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
354 return (QLA_FUNCTION_FAILED);
356 vha->marker_needed = 0;
359 /* Acquire ring specific lock */
360 spin_lock_irqsave(&ha->hardware_lock, flags);
362 handle = qla2xxx_get_next_handle(req);
366 /* Map the sg table so we have an accurate count of sg entries needed */
367 if (scsi_sg_count(cmd)) {
368 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369 scsi_sg_count(cmd), cmd->sc_data_direction);
377 /* Calculate the number of request entries needed. */
378 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379 if (req->cnt < (req_cnt + 2)) {
380 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
381 if (req->ring_index < cnt)
382 req->cnt = cnt - req->ring_index;
384 req->cnt = req->length -
385 (req->ring_index - cnt);
386 /* If still no head room then bail out */
387 if (req->cnt < (req_cnt + 2))
391 /* Build command packet */
392 req->current_outstanding_cmd = handle;
393 req->outstanding_cmds[handle] = sp;
395 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
398 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
399 cmd_pkt->handle = handle;
400 /* Zero out remaining portion of packet. */
401 clr_ptr = (uint32_t *)cmd_pkt + 2;
402 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
403 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405 /* Set target ID and LUN number*/
406 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
407 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
408 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
410 /* Load SCSI command packet. */
411 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
412 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
414 /* Build IOCB segments */
415 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
417 /* Set total data segment count. */
418 cmd_pkt->entry_count = (uint8_t)req_cnt;
421 /* Adjust ring index. */
423 if (req->ring_index == req->length) {
425 req->ring_ptr = req->ring;
429 sp->flags |= SRB_DMA_VALID;
431 /* Set chip new ring index. */
432 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
433 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
435 /* Manage unprocessed RIO/ZIO commands in response queue. */
436 if (vha->flags.process_response_queue &&
437 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
438 qla2x00_process_response_queue(rsp);
440 spin_unlock_irqrestore(&ha->hardware_lock, flags);
441 return (QLA_SUCCESS);
447 spin_unlock_irqrestore(&ha->hardware_lock, flags);
449 return (QLA_FUNCTION_FAILED);
453 * qla2x00_start_iocbs() - Execute the IOCB command
455 * @req: request queue
458 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
460 struct qla_hw_data *ha = vha->hw;
461 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
463 if (IS_P3P_TYPE(ha)) {
464 qla82xx_start_iocbs(vha);
466 /* Adjust ring index. */
468 if (req->ring_index == req->length) {
470 req->ring_ptr = req->ring;
474 /* Set chip new ring index. */
475 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
476 wrt_reg_dword(req->req_q_in, req->ring_index);
477 } else if (IS_QLA83XX(ha)) {
478 wrt_reg_dword(req->req_q_in, req->ring_index);
479 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
480 } else if (IS_QLAFX00(ha)) {
481 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index);
482 rd_reg_dword_relaxed(®->ispfx00.req_q_in);
483 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
484 } else if (IS_FWI2_CAPABLE(ha)) {
485 wrt_reg_dword(®->isp24.req_q_in, req->ring_index);
486 rd_reg_dword_relaxed(®->isp24.req_q_in);
488 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp),
490 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp));
496 * __qla2x00_marker() - Send a marker IOCB to the firmware.
498 * @qpair: queue pair pointer
501 * @type: marker modifier
503 * Can be called from both normal and interrupt context.
505 * Returns non-zero if a failure occurred, else zero.
508 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
509 uint16_t loop_id, uint64_t lun, uint8_t type)
512 struct mrk_entry_24xx *mrk24 = NULL;
513 struct req_que *req = qpair->req;
514 struct qla_hw_data *ha = vha->hw;
515 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
517 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
519 ql_log(ql_log_warn, base_vha, 0x3026,
520 "Failed to allocate Marker IOCB.\n");
522 return (QLA_FUNCTION_FAILED);
525 mrk->entry_type = MARKER_TYPE;
526 mrk->modifier = type;
527 if (type != MK_SYNC_ALL) {
528 if (IS_FWI2_CAPABLE(ha)) {
529 mrk24 = (struct mrk_entry_24xx *) mrk;
530 mrk24->nport_handle = cpu_to_le16(loop_id);
531 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
532 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
533 mrk24->vp_index = vha->vp_idx;
534 mrk24->handle = make_handle(req->id, mrk24->handle);
536 SET_TARGET_ID(ha, mrk->target, loop_id);
537 mrk->lun = cpu_to_le16((uint16_t)lun);
542 qla2x00_start_iocbs(vha, req);
544 return (QLA_SUCCESS);
548 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
549 uint16_t loop_id, uint64_t lun, uint8_t type)
552 unsigned long flags = 0;
554 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
555 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
556 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
562 * qla2x00_issue_marker
565 * Caller CAN have hardware lock held as specified by ha_locked parameter.
566 * Might release it, then reaquire.
568 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
571 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
572 MK_SYNC_ALL) != QLA_SUCCESS)
573 return QLA_FUNCTION_FAILED;
575 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
576 MK_SYNC_ALL) != QLA_SUCCESS)
577 return QLA_FUNCTION_FAILED;
579 vha->marker_needed = 0;
585 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
588 struct dsd64 *cur_dsd = NULL, *next_dsd;
589 scsi_qla_host_t *vha;
590 struct qla_hw_data *ha;
591 struct scsi_cmnd *cmd;
592 struct scatterlist *cur_seg;
594 uint8_t first_iocb = 1;
595 uint32_t dsd_list_len;
596 struct dsd_dma *dsd_ptr;
598 struct qla_qpair *qpair = sp->qpair;
600 cmd = GET_CMD_SP(sp);
602 /* Update entry type to indicate Command Type 3 IOCB */
603 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
605 /* No data transfer */
606 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607 cmd_pkt->byte_count = cpu_to_le32(0);
614 /* Set transfer direction */
615 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617 qpair->counters.output_bytes += scsi_bufflen(cmd);
618 qpair->counters.output_requests++;
619 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621 qpair->counters.input_bytes += scsi_bufflen(cmd);
622 qpair->counters.input_requests++;
625 cur_seg = scsi_sglist(cmd);
626 ctx = sp->u.scmd.ct6_ctx;
629 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630 QLA_DSDS_PER_IOCB : tot_dsds;
631 tot_dsds -= avail_dsds;
632 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
634 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635 struct dsd_dma, list);
636 next_dsd = dsd_ptr->dsd_addr;
637 list_del(&dsd_ptr->list);
639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
645 put_unaligned_le64(dsd_ptr->dsd_list_dma,
646 &cmd_pkt->fcp_dsd.address);
647 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
649 put_unaligned_le64(dsd_ptr->dsd_list_dma,
651 cur_dsd->length = cpu_to_le32(dsd_list_len);
656 append_dsd64(&cur_dsd, cur_seg);
657 cur_seg = sg_next(cur_seg);
662 /* Null termination */
663 cur_dsd->address = 0;
666 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
674 * @dsds: number of data segment descriptors needed
676 * Returns the number of dsd list needed to store @dsds.
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
681 uint16_t dsd_lists = 0;
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701 uint16_t tot_dsds, struct req_que *req)
704 struct dsd64 *cur_dsd;
705 scsi_qla_host_t *vha;
706 struct scsi_cmnd *cmd;
707 struct scatterlist *sg;
709 struct qla_qpair *qpair = sp->qpair;
711 cmd = GET_CMD_SP(sp);
713 /* Update entry type to indicate Command Type 3 IOCB */
714 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
716 /* No data transfer */
717 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
718 cmd_pkt->byte_count = cpu_to_le32(0);
724 /* Set transfer direction */
725 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
726 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
727 qpair->counters.output_bytes += scsi_bufflen(cmd);
728 qpair->counters.output_requests++;
729 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
731 qpair->counters.input_bytes += scsi_bufflen(cmd);
732 qpair->counters.input_requests++;
735 /* One DSD is available in the Command Type 3 IOCB */
737 cur_dsd = &cmd_pkt->dsd;
739 /* Load data segments */
741 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
742 cont_a64_entry_t *cont_pkt;
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
747 * Five DSDs are available in the Continuation
750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751 cur_dsd = cont_pkt->dsd;
752 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
755 append_dsd64(&cur_dsd, sg);
760 struct fw_dif_context {
763 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
764 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
768 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
773 unsigned int protcnt)
775 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
777 pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
779 if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
780 qla2x00_hba_err_chk_enabled(sp)) {
781 pkt->ref_tag_mask[0] = 0xff;
782 pkt->ref_tag_mask[1] = 0xff;
783 pkt->ref_tag_mask[2] = 0xff;
784 pkt->ref_tag_mask[3] = 0xff;
787 pkt->app_tag = cpu_to_le16(0);
788 pkt->app_tag_mask[0] = 0x0;
789 pkt->app_tag_mask[1] = 0x0;
793 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
796 struct scatterlist *sg;
797 uint32_t cumulative_partial, sg_len;
798 dma_addr_t sg_dma_addr;
800 if (sgx->num_bytes == sgx->tot_bytes)
804 cumulative_partial = sgx->tot_partial;
806 sg_dma_addr = sg_dma_address(sg);
807 sg_len = sg_dma_len(sg);
809 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
811 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
812 sgx->dma_len = (blk_sz - cumulative_partial);
813 sgx->tot_partial = 0;
814 sgx->num_bytes += blk_sz;
817 sgx->dma_len = sg_len - sgx->bytes_consumed;
818 sgx->tot_partial += sgx->dma_len;
822 sgx->bytes_consumed += sgx->dma_len;
824 if (sg_len == sgx->bytes_consumed) {
828 sgx->bytes_consumed = 0;
835 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
836 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
839 uint8_t avail_dsds = 0;
840 uint32_t dsd_list_len;
841 struct dsd_dma *dsd_ptr;
842 struct scatterlist *sg_prot;
843 struct dsd64 *cur_dsd = dsd;
844 uint16_t used_dsds = tot_dsds;
845 uint32_t prot_int; /* protection interval */
849 uint32_t sle_dma_len, tot_prot_dma_len = 0;
850 struct scsi_cmnd *cmd;
852 memset(&sgx, 0, sizeof(struct qla2_sgx));
854 cmd = GET_CMD_SP(sp);
855 prot_int = scsi_prot_interval(cmd);
857 sgx.tot_bytes = scsi_bufflen(cmd);
858 sgx.cur_sg = scsi_sglist(cmd);
861 sg_prot = scsi_prot_sglist(cmd);
863 prot_int = tc->blk_sz;
864 sgx.tot_bytes = tc->bufflen;
866 sg_prot = tc->prot_sg;
872 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
874 sle_dma = sgx.dma_addr;
875 sle_dma_len = sgx.dma_len;
877 /* Allocate additional continuation packets? */
878 if (avail_dsds == 0) {
879 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
880 QLA_DSDS_PER_IOCB : used_dsds;
881 dsd_list_len = (avail_dsds + 1) * 12;
882 used_dsds -= avail_dsds;
884 /* allocate tracking DS */
885 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
889 /* allocate new list */
890 dsd_ptr->dsd_addr = next_dsd =
891 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
892 &dsd_ptr->dsd_list_dma);
896 * Need to cleanup only this dsd_ptr, rest
897 * will be done by sp_free_dma()
904 list_add_tail(&dsd_ptr->list,
905 &sp->u.scmd.crc_ctx->dsd_list);
907 sp->flags |= SRB_CRC_CTX_DSD_VALID;
909 list_add_tail(&dsd_ptr->list,
910 &(tc->ctx->dsd_list));
911 *tc->ctx_dsd_alloced = 1;
915 /* add new list to cmd iocb or last list */
916 put_unaligned_le64(dsd_ptr->dsd_list_dma,
918 cur_dsd->length = cpu_to_le32(dsd_list_len);
921 put_unaligned_le64(sle_dma, &cur_dsd->address);
922 cur_dsd->length = cpu_to_le32(sle_dma_len);
927 /* Got a full protection interval */
928 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
931 tot_prot_dma_len += sle_dma_len;
932 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
933 tot_prot_dma_len = 0;
934 sg_prot = sg_next(sg_prot);
937 partial = 1; /* So as to not re-enter this block */
941 /* Null termination */
942 cur_dsd->address = 0;
949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
950 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg, *sgl;
957 struct dsd64 *cur_dsd = dsd;
959 uint16_t used_dsds = tot_dsds;
960 struct scsi_cmnd *cmd;
963 cmd = GET_CMD_SP(sp);
964 sgl = scsi_sglist(cmd);
973 for_each_sg(sgl, sg, tot_dsds, i) {
974 /* Allocate additional continuation packets? */
975 if (avail_dsds == 0) {
976 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
977 QLA_DSDS_PER_IOCB : used_dsds;
978 dsd_list_len = (avail_dsds + 1) * 12;
979 used_dsds -= avail_dsds;
981 /* allocate tracking DS */
982 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
986 /* allocate new list */
987 dsd_ptr->dsd_addr = next_dsd =
988 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
989 &dsd_ptr->dsd_list_dma);
993 * Need to cleanup only this dsd_ptr, rest
994 * will be done by sp_free_dma()
1001 list_add_tail(&dsd_ptr->list,
1002 &sp->u.scmd.crc_ctx->dsd_list);
1004 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1006 list_add_tail(&dsd_ptr->list,
1007 &(tc->ctx->dsd_list));
1008 *tc->ctx_dsd_alloced = 1;
1011 /* add new list to cmd iocb or last list */
1012 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1014 cur_dsd->length = cpu_to_le32(dsd_list_len);
1017 append_dsd64(&cur_dsd, sg);
1021 /* Null termination */
1022 cur_dsd->address = 0;
1023 cur_dsd->length = 0;
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1030 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1032 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1033 struct scatterlist *sg, *sgl;
1034 struct crc_context *difctx = NULL;
1035 struct scsi_qla_host *vha;
1037 uint avail_dsds = 0;
1038 uint used_dsds = tot_dsds;
1039 bool dif_local_dma_alloc = false;
1040 bool direction_to_device = false;
1044 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1046 sgl = scsi_prot_sglist(cmd);
1048 difctx = sp->u.scmd.crc_ctx;
1049 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1050 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1051 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1052 __func__, cmd, difctx, sp);
1057 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1063 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1064 "%s: enter (write=%u)\n", __func__, direction_to_device);
1066 /* if initiator doing write or target doing read */
1067 if (direction_to_device) {
1068 for_each_sg(sgl, sg, tot_dsds, i) {
1069 u64 sle_phys = sg_phys(sg);
1071 /* If SGE addr + len flips bits in upper 32-bits */
1072 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1073 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1074 "%s: page boundary crossing (phys=%llx len=%x)\n",
1075 __func__, sle_phys, sg->length);
1078 ha->dif_bundle_crossed_pages++;
1079 dif_local_dma_alloc = true;
1081 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1083 "%s: difctx pointer is NULL\n",
1089 ha->dif_bundle_writes++;
1091 ha->dif_bundle_reads++;
1094 if (ql2xdifbundlinginternalbuffers)
1095 dif_local_dma_alloc = direction_to_device;
1097 if (dif_local_dma_alloc) {
1098 u32 track_difbundl_buf = 0;
1099 u32 ldma_sg_len = 0;
1102 difctx->no_dif_bundl = 0;
1103 difctx->dif_bundl_len = 0;
1105 /* Track DSD buffers */
1106 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1107 /* Track local DMA buffers */
1108 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1110 for_each_sg(sgl, sg, tot_dsds, i) {
1111 u32 sglen = sg_dma_len(sg);
1113 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1114 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1115 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1116 difctx->dif_bundl_len, ldma_needed);
1123 * Allocate list item to store
1126 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1129 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1130 "%s: failed alloc dsd_ptr\n",
1134 ha->dif_bundle_kallocs++;
1136 /* allocate dma buffer */
1137 dsd_ptr->dsd_addr = dma_pool_alloc
1138 (ha->dif_bundl_pool, GFP_ATOMIC,
1139 &dsd_ptr->dsd_list_dma);
1140 if (!dsd_ptr->dsd_addr) {
1141 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1142 "%s: failed alloc ->dsd_ptr\n",
1145 * need to cleanup only this
1146 * dsd_ptr rest will be done
1150 ha->dif_bundle_kallocs--;
1153 ha->dif_bundle_dma_allocs++;
1155 difctx->no_dif_bundl++;
1156 list_add_tail(&dsd_ptr->list,
1157 &difctx->ldif_dma_hndl_list);
1160 /* xfrlen is min of dma pool size and sglen */
1162 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1163 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1166 /* replace with local allocated dma buffer */
1167 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1168 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1169 difctx->dif_bundl_len);
1170 difctx->dif_bundl_len += xfrlen;
1172 ldma_sg_len += xfrlen;
1173 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1181 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1182 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1183 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1184 difctx->dif_bundl_len, difctx->no_dif_bundl,
1185 track_difbundl_buf);
1188 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1190 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1192 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1193 &difctx->ldif_dma_hndl_list, list) {
1194 u32 sglen = (difctx->dif_bundl_len >
1195 DIF_BUNDLING_DMA_POOL_SIZE) ?
1196 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1198 BUG_ON(track_difbundl_buf == 0);
1200 /* Allocate additional continuation packets? */
1201 if (avail_dsds == 0) {
1202 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1204 "%s: adding continuation iocb's\n",
1206 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1207 QLA_DSDS_PER_IOCB : used_dsds;
1208 dsd_list_len = (avail_dsds + 1) * 12;
1209 used_dsds -= avail_dsds;
1211 /* allocate tracking DS */
1212 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1214 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1215 "%s: failed alloc dsd_ptr\n",
1219 ha->dif_bundle_kallocs++;
1221 difctx->no_ldif_dsd++;
1222 /* allocate new list */
1224 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1225 &dsd_ptr->dsd_list_dma);
1226 if (!dsd_ptr->dsd_addr) {
1227 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1228 "%s: failed alloc ->dsd_addr\n",
1231 * need to cleanup only this dsd_ptr
1232 * rest will be done by sp_free_dma()
1235 ha->dif_bundle_kallocs--;
1238 ha->dif_bundle_dma_allocs++;
1241 list_add_tail(&dsd_ptr->list,
1242 &difctx->ldif_dsd_list);
1243 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1245 list_add_tail(&dsd_ptr->list,
1246 &difctx->ldif_dsd_list);
1247 tc->ctx_dsd_alloced = 1;
1250 /* add new list to cmd iocb or last list */
1251 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1253 cur_dsd->length = cpu_to_le32(dsd_list_len);
1254 cur_dsd = dsd_ptr->dsd_addr;
1256 put_unaligned_le64(dif_dsd->dsd_list_dma,
1258 cur_dsd->length = cpu_to_le32(sglen);
1261 difctx->dif_bundl_len -= sglen;
1262 track_difbundl_buf--;
1265 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1266 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1267 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1269 for_each_sg(sgl, sg, tot_dsds, i) {
1270 /* Allocate additional continuation packets? */
1271 if (avail_dsds == 0) {
1272 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1273 QLA_DSDS_PER_IOCB : used_dsds;
1274 dsd_list_len = (avail_dsds + 1) * 12;
1275 used_dsds -= avail_dsds;
1277 /* allocate tracking DS */
1278 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1280 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1282 "%s: failed alloc dsd_dma...\n",
1287 /* allocate new list */
1289 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1290 &dsd_ptr->dsd_list_dma);
1291 if (!dsd_ptr->dsd_addr) {
1292 /* need to cleanup only this dsd_ptr */
1293 /* rest will be done by sp_free_dma() */
1299 list_add_tail(&dsd_ptr->list,
1301 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1303 list_add_tail(&dsd_ptr->list,
1305 tc->ctx_dsd_alloced = 1;
1308 /* add new list to cmd iocb or last list */
1309 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1311 cur_dsd->length = cpu_to_le32(dsd_list_len);
1312 cur_dsd = dsd_ptr->dsd_addr;
1314 append_dsd64(&cur_dsd, sg);
1318 /* Null termination */
1319 cur_dsd->address = 0;
1320 cur_dsd->length = 0;
1326 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1327 * Type 6 IOCB types.
1329 * @sp: SRB command to process
1330 * @cmd_pkt: Command type 3 IOCB
1331 * @tot_dsds: Total number of segments to transfer
1332 * @tot_prot_dsds: Total number of segments with protection information
1333 * @fw_prot_opts: Protection options to be passed to firmware
1336 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1337 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1339 struct dsd64 *cur_dsd;
1341 scsi_qla_host_t *vha;
1342 struct scsi_cmnd *cmd;
1343 uint32_t total_bytes = 0;
1344 uint32_t data_bytes;
1346 uint8_t bundling = 1;
1348 struct crc_context *crc_ctx_pkt = NULL;
1349 struct qla_hw_data *ha;
1350 uint8_t additional_fcpcdb_len;
1351 uint16_t fcp_cmnd_len;
1352 struct fcp_cmnd *fcp_cmnd;
1353 dma_addr_t crc_ctx_dma;
1355 cmd = GET_CMD_SP(sp);
1357 /* Update entry type to indicate Command Type CRC_2 IOCB */
1358 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1363 /* No data transfer */
1364 data_bytes = scsi_bufflen(cmd);
1365 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1366 cmd_pkt->byte_count = cpu_to_le32(0);
1370 cmd_pkt->vp_index = sp->vha->vp_idx;
1372 /* Set transfer direction */
1373 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1374 cmd_pkt->control_flags =
1375 cpu_to_le16(CF_WRITE_DATA);
1376 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1377 cmd_pkt->control_flags =
1378 cpu_to_le16(CF_READ_DATA);
1381 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1382 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1383 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1384 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1387 /* Allocate CRC context from global pool */
1388 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1389 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1392 goto crc_queuing_error;
1394 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1396 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1399 crc_ctx_pkt->handle = cmd_pkt->handle;
1401 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1403 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1404 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1406 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1407 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1409 /* Determine SCSI command length -- align to 4 byte boundary */
1410 if (cmd->cmd_len > 16) {
1411 additional_fcpcdb_len = cmd->cmd_len - 16;
1412 if ((cmd->cmd_len % 4) != 0) {
1413 /* SCSI cmd > 16 bytes must be multiple of 4 */
1414 goto crc_queuing_error;
1416 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1418 additional_fcpcdb_len = 0;
1419 fcp_cmnd_len = 12 + 16 + 4;
1422 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1424 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1425 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1426 fcp_cmnd->additional_cdb_len |= 1;
1427 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1428 fcp_cmnd->additional_cdb_len |= 2;
1430 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1431 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1432 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1433 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1434 &cmd_pkt->fcp_cmnd_dseg_address);
1435 fcp_cmnd->task_management = 0;
1436 fcp_cmnd->task_attribute = TSK_SIMPLE;
1438 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1440 /* Compute dif len and adjust data len to incude protection */
1442 blk_size = cmd->device->sector_size;
1443 dif_bytes = (data_bytes / blk_size) * 8;
1445 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1446 case SCSI_PROT_READ_INSERT:
1447 case SCSI_PROT_WRITE_STRIP:
1448 total_bytes = data_bytes;
1449 data_bytes += dif_bytes;
1452 case SCSI_PROT_READ_STRIP:
1453 case SCSI_PROT_WRITE_INSERT:
1454 case SCSI_PROT_READ_PASS:
1455 case SCSI_PROT_WRITE_PASS:
1456 total_bytes = data_bytes + dif_bytes;
1462 if (!qla2x00_hba_err_chk_enabled(sp))
1463 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1464 /* HBA error checking enabled */
1465 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1466 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1467 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1468 SCSI_PROT_DIF_TYPE2))
1469 fw_prot_opts |= BIT_10;
1470 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1471 SCSI_PROT_DIF_TYPE3)
1472 fw_prot_opts |= BIT_11;
1476 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1479 * Configure Bundling if we need to fetch interlaving
1480 * protection PCI accesses
1482 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1483 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1484 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1486 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1489 /* Finish the common fields of CRC pkt */
1490 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1491 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1492 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1493 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1494 /* Fibre channel byte count */
1495 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1496 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1497 additional_fcpcdb_len);
1498 *fcp_dl = htonl(total_bytes);
1500 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1501 cmd_pkt->byte_count = cpu_to_le32(0);
1504 /* Walks data segments */
1506 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1508 if (!bundling && tot_prot_dsds) {
1509 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1510 cur_dsd, tot_dsds, NULL))
1511 goto crc_queuing_error;
1512 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1513 (tot_dsds - tot_prot_dsds), NULL))
1514 goto crc_queuing_error;
1516 if (bundling && tot_prot_dsds) {
1517 /* Walks dif segments */
1518 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1519 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1520 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1521 tot_prot_dsds, NULL))
1522 goto crc_queuing_error;
1527 /* Cleanup will be performed by the caller */
1529 return QLA_FUNCTION_FAILED;
1533 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1534 * @sp: command to send to the ISP
1536 * Returns non-zero if a failure occurred, else zero.
1539 qla24xx_start_scsi(srb_t *sp)
1542 unsigned long flags;
1545 struct cmd_type_7 *cmd_pkt;
1549 struct req_que *req = NULL;
1550 struct rsp_que *rsp;
1551 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1552 struct scsi_qla_host *vha = sp->vha;
1553 struct qla_hw_data *ha = vha->hw;
1555 /* Setup device pointers. */
1559 /* So we know we haven't pci_map'ed anything yet */
1562 /* Send marker if required */
1563 if (vha->marker_needed != 0) {
1564 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1566 return QLA_FUNCTION_FAILED;
1567 vha->marker_needed = 0;
1570 /* Acquire ring specific lock */
1571 spin_lock_irqsave(&ha->hardware_lock, flags);
1573 handle = qla2xxx_get_next_handle(req);
1577 /* Map the sg table so we have an accurate count of sg entries needed */
1578 if (scsi_sg_count(cmd)) {
1579 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1580 scsi_sg_count(cmd), cmd->sc_data_direction);
1581 if (unlikely(!nseg))
1587 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1589 sp->iores.res_type = RESOURCE_INI;
1590 sp->iores.iocb_cnt = req_cnt;
1591 if (qla_get_iocbs(sp->qpair, &sp->iores))
1594 if (req->cnt < (req_cnt + 2)) {
1595 if (IS_SHADOW_REG_CAPABLE(ha)) {
1596 cnt = *req->out_ptr;
1598 cnt = rd_reg_dword_relaxed(req->req_q_out);
1599 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1603 if (req->ring_index < cnt)
1604 req->cnt = cnt - req->ring_index;
1606 req->cnt = req->length -
1607 (req->ring_index - cnt);
1608 if (req->cnt < (req_cnt + 2))
1612 /* Build command packet. */
1613 req->current_outstanding_cmd = handle;
1614 req->outstanding_cmds[handle] = sp;
1615 sp->handle = handle;
1616 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1617 req->cnt -= req_cnt;
1619 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1620 cmd_pkt->handle = make_handle(req->id, handle);
1622 /* Zero out remaining portion of packet. */
1623 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1624 clr_ptr = (uint32_t *)cmd_pkt + 2;
1625 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1626 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1628 /* Set NPORT-ID and LUN number*/
1629 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1630 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1631 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1632 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1633 cmd_pkt->vp_index = sp->vha->vp_idx;
1635 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1636 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1638 cmd_pkt->task = TSK_SIMPLE;
1640 /* Load SCSI command packet. */
1641 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1642 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1644 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1646 /* Build IOCB segments */
1647 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1649 /* Set total data segment count. */
1650 cmd_pkt->entry_count = (uint8_t)req_cnt;
1652 /* Adjust ring index. */
1654 if (req->ring_index == req->length) {
1655 req->ring_index = 0;
1656 req->ring_ptr = req->ring;
1660 sp->qpair->cmd_cnt++;
1661 sp->flags |= SRB_DMA_VALID;
1663 /* Set chip new ring index. */
1664 wrt_reg_dword(req->req_q_in, req->ring_index);
1666 /* Manage unprocessed RIO/ZIO commands in response queue. */
1667 if (vha->flags.process_response_queue &&
1668 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1669 qla24xx_process_response_queue(vha, rsp);
1671 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1676 scsi_dma_unmap(cmd);
1678 qla_put_iocbs(sp->qpair, &sp->iores);
1679 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1681 return QLA_FUNCTION_FAILED;
1685 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1686 * @sp: command to send to the ISP
1688 * Returns non-zero if a failure occurred, else zero.
1691 qla24xx_dif_start_scsi(srb_t *sp)
1694 unsigned long flags;
1698 uint16_t req_cnt = 0;
1700 uint16_t tot_prot_dsds;
1701 uint16_t fw_prot_opts = 0;
1702 struct req_que *req = NULL;
1703 struct rsp_que *rsp = NULL;
1704 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1705 struct scsi_qla_host *vha = sp->vha;
1706 struct qla_hw_data *ha = vha->hw;
1707 struct cmd_type_crc_2 *cmd_pkt;
1708 uint32_t status = 0;
1710 #define QDSS_GOT_Q_SPACE BIT_0
1712 /* Only process protection or >16 cdb in this routine */
1713 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1714 if (cmd->cmd_len <= 16)
1715 return qla24xx_start_scsi(sp);
1718 /* Setup device pointers. */
1722 /* So we know we haven't pci_map'ed anything yet */
1725 /* Send marker if required */
1726 if (vha->marker_needed != 0) {
1727 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1729 return QLA_FUNCTION_FAILED;
1730 vha->marker_needed = 0;
1733 /* Acquire ring specific lock */
1734 spin_lock_irqsave(&ha->hardware_lock, flags);
1736 handle = qla2xxx_get_next_handle(req);
1740 /* Compute number of required data segments */
1741 /* Map the sg table so we have an accurate count of sg entries needed */
1742 if (scsi_sg_count(cmd)) {
1743 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1744 scsi_sg_count(cmd), cmd->sc_data_direction);
1745 if (unlikely(!nseg))
1748 sp->flags |= SRB_DMA_VALID;
1750 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1751 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1752 struct qla2_sgx sgx;
1755 memset(&sgx, 0, sizeof(struct qla2_sgx));
1756 sgx.tot_bytes = scsi_bufflen(cmd);
1757 sgx.cur_sg = scsi_sglist(cmd);
1761 while (qla24xx_get_one_block_sg(
1762 cmd->device->sector_size, &sgx, &partial))
1768 /* number of required data segments */
1771 /* Compute number of required protection segments */
1772 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1773 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1774 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1775 if (unlikely(!nseg))
1778 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1780 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1781 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1782 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1789 /* Total Data and protection sg segment(s) */
1790 tot_prot_dsds = nseg;
1793 sp->iores.res_type = RESOURCE_INI;
1794 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1795 if (qla_get_iocbs(sp->qpair, &sp->iores))
1798 if (req->cnt < (req_cnt + 2)) {
1799 if (IS_SHADOW_REG_CAPABLE(ha)) {
1800 cnt = *req->out_ptr;
1802 cnt = rd_reg_dword_relaxed(req->req_q_out);
1803 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1806 if (req->ring_index < cnt)
1807 req->cnt = cnt - req->ring_index;
1809 req->cnt = req->length -
1810 (req->ring_index - cnt);
1811 if (req->cnt < (req_cnt + 2))
1815 status |= QDSS_GOT_Q_SPACE;
1817 /* Build header part of command packet (excluding the OPCODE). */
1818 req->current_outstanding_cmd = handle;
1819 req->outstanding_cmds[handle] = sp;
1820 sp->handle = handle;
1821 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1822 req->cnt -= req_cnt;
1824 /* Fill-in common area */
1825 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1826 cmd_pkt->handle = make_handle(req->id, handle);
1828 clr_ptr = (uint32_t *)cmd_pkt + 2;
1829 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1831 /* Set NPORT-ID and LUN number*/
1832 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1833 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1834 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1835 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1837 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1838 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1840 /* Total Data and protection segment(s) */
1841 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1843 /* Build IOCB segments and adjust for data protection segments */
1844 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1845 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1849 cmd_pkt->entry_count = (uint8_t)req_cnt;
1850 /* Specify response queue number where completion should happen */
1851 cmd_pkt->entry_status = (uint8_t) rsp->id;
1852 cmd_pkt->timeout = cpu_to_le16(0);
1855 /* Adjust ring index. */
1857 if (req->ring_index == req->length) {
1858 req->ring_index = 0;
1859 req->ring_ptr = req->ring;
1863 sp->qpair->cmd_cnt++;
1864 /* Set chip new ring index. */
1865 wrt_reg_dword(req->req_q_in, req->ring_index);
1867 /* Manage unprocessed RIO/ZIO commands in response queue. */
1868 if (vha->flags.process_response_queue &&
1869 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1870 qla24xx_process_response_queue(vha, rsp);
1872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1877 if (status & QDSS_GOT_Q_SPACE) {
1878 req->outstanding_cmds[handle] = NULL;
1879 req->cnt += req_cnt;
1881 /* Cleanup will be performed by the caller (queuecommand) */
1883 qla_put_iocbs(sp->qpair, &sp->iores);
1884 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1886 return QLA_FUNCTION_FAILED;
1890 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1891 * @sp: command to send to the ISP
1893 * Returns non-zero if a failure occurred, else zero.
1896 qla2xxx_start_scsi_mq(srb_t *sp)
1899 unsigned long flags;
1902 struct cmd_type_7 *cmd_pkt;
1906 struct req_que *req = NULL;
1907 struct rsp_que *rsp;
1908 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1909 struct scsi_qla_host *vha = sp->fcport->vha;
1910 struct qla_hw_data *ha = vha->hw;
1911 struct qla_qpair *qpair = sp->qpair;
1913 /* Acquire qpair specific lock */
1914 spin_lock_irqsave(&qpair->qp_lock, flags);
1916 /* Setup qpair pointers */
1920 /* So we know we haven't pci_map'ed anything yet */
1923 /* Send marker if required */
1924 if (vha->marker_needed != 0) {
1925 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1927 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1928 return QLA_FUNCTION_FAILED;
1930 vha->marker_needed = 0;
1933 handle = qla2xxx_get_next_handle(req);
1937 /* Map the sg table so we have an accurate count of sg entries needed */
1938 if (scsi_sg_count(cmd)) {
1939 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1940 scsi_sg_count(cmd), cmd->sc_data_direction);
1941 if (unlikely(!nseg))
1947 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1949 sp->iores.res_type = RESOURCE_INI;
1950 sp->iores.iocb_cnt = req_cnt;
1951 if (qla_get_iocbs(sp->qpair, &sp->iores))
1954 if (req->cnt < (req_cnt + 2)) {
1955 if (IS_SHADOW_REG_CAPABLE(ha)) {
1956 cnt = *req->out_ptr;
1958 cnt = rd_reg_dword_relaxed(req->req_q_out);
1959 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1963 if (req->ring_index < cnt)
1964 req->cnt = cnt - req->ring_index;
1966 req->cnt = req->length -
1967 (req->ring_index - cnt);
1968 if (req->cnt < (req_cnt + 2))
1972 /* Build command packet. */
1973 req->current_outstanding_cmd = handle;
1974 req->outstanding_cmds[handle] = sp;
1975 sp->handle = handle;
1976 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1977 req->cnt -= req_cnt;
1979 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1980 cmd_pkt->handle = make_handle(req->id, handle);
1982 /* Zero out remaining portion of packet. */
1983 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1984 clr_ptr = (uint32_t *)cmd_pkt + 2;
1985 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1986 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1988 /* Set NPORT-ID and LUN number*/
1989 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1990 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1991 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1992 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1993 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1995 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1996 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1998 cmd_pkt->task = TSK_SIMPLE;
2000 /* Load SCSI command packet. */
2001 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2002 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2004 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2006 /* Build IOCB segments */
2007 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2009 /* Set total data segment count. */
2010 cmd_pkt->entry_count = (uint8_t)req_cnt;
2012 /* Adjust ring index. */
2014 if (req->ring_index == req->length) {
2015 req->ring_index = 0;
2016 req->ring_ptr = req->ring;
2020 sp->qpair->cmd_cnt++;
2021 sp->flags |= SRB_DMA_VALID;
2023 /* Set chip new ring index. */
2024 wrt_reg_dword(req->req_q_in, req->ring_index);
2026 /* Manage unprocessed RIO/ZIO commands in response queue. */
2027 if (vha->flags.process_response_queue &&
2028 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2029 qla24xx_process_response_queue(vha, rsp);
2031 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2036 scsi_dma_unmap(cmd);
2038 qla_put_iocbs(sp->qpair, &sp->iores);
2039 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2041 return QLA_FUNCTION_FAILED;
2046 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2047 * @sp: command to send to the ISP
2049 * Returns non-zero if a failure occurred, else zero.
2052 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2055 unsigned long flags;
2059 uint16_t req_cnt = 0;
2061 uint16_t tot_prot_dsds;
2062 uint16_t fw_prot_opts = 0;
2063 struct req_que *req = NULL;
2064 struct rsp_que *rsp = NULL;
2065 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2066 struct scsi_qla_host *vha = sp->fcport->vha;
2067 struct qla_hw_data *ha = vha->hw;
2068 struct cmd_type_crc_2 *cmd_pkt;
2069 uint32_t status = 0;
2070 struct qla_qpair *qpair = sp->qpair;
2072 #define QDSS_GOT_Q_SPACE BIT_0
2074 /* Check for host side state */
2075 if (!qpair->online) {
2076 cmd->result = DID_NO_CONNECT << 16;
2077 return QLA_INTERFACE_ERROR;
2080 if (!qpair->difdix_supported &&
2081 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2082 cmd->result = DID_NO_CONNECT << 16;
2083 return QLA_INTERFACE_ERROR;
2086 /* Only process protection or >16 cdb in this routine */
2087 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2088 if (cmd->cmd_len <= 16)
2089 return qla2xxx_start_scsi_mq(sp);
2092 spin_lock_irqsave(&qpair->qp_lock, flags);
2094 /* Setup qpair pointers */
2098 /* So we know we haven't pci_map'ed anything yet */
2101 /* Send marker if required */
2102 if (vha->marker_needed != 0) {
2103 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2105 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2106 return QLA_FUNCTION_FAILED;
2108 vha->marker_needed = 0;
2111 handle = qla2xxx_get_next_handle(req);
2115 /* Compute number of required data segments */
2116 /* Map the sg table so we have an accurate count of sg entries needed */
2117 if (scsi_sg_count(cmd)) {
2118 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2119 scsi_sg_count(cmd), cmd->sc_data_direction);
2120 if (unlikely(!nseg))
2123 sp->flags |= SRB_DMA_VALID;
2125 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2126 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2127 struct qla2_sgx sgx;
2130 memset(&sgx, 0, sizeof(struct qla2_sgx));
2131 sgx.tot_bytes = scsi_bufflen(cmd);
2132 sgx.cur_sg = scsi_sglist(cmd);
2136 while (qla24xx_get_one_block_sg(
2137 cmd->device->sector_size, &sgx, &partial))
2143 /* number of required data segments */
2146 /* Compute number of required protection segments */
2147 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2148 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2149 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2150 if (unlikely(!nseg))
2153 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2155 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2156 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2157 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2164 /* Total Data and protection sg segment(s) */
2165 tot_prot_dsds = nseg;
2168 sp->iores.res_type = RESOURCE_INI;
2169 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2170 if (qla_get_iocbs(sp->qpair, &sp->iores))
2173 if (req->cnt < (req_cnt + 2)) {
2174 if (IS_SHADOW_REG_CAPABLE(ha)) {
2175 cnt = *req->out_ptr;
2177 cnt = rd_reg_dword_relaxed(req->req_q_out);
2178 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2182 if (req->ring_index < cnt)
2183 req->cnt = cnt - req->ring_index;
2185 req->cnt = req->length -
2186 (req->ring_index - cnt);
2187 if (req->cnt < (req_cnt + 2))
2191 status |= QDSS_GOT_Q_SPACE;
2193 /* Build header part of command packet (excluding the OPCODE). */
2194 req->current_outstanding_cmd = handle;
2195 req->outstanding_cmds[handle] = sp;
2196 sp->handle = handle;
2197 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2198 req->cnt -= req_cnt;
2200 /* Fill-in common area */
2201 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2202 cmd_pkt->handle = make_handle(req->id, handle);
2204 clr_ptr = (uint32_t *)cmd_pkt + 2;
2205 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2207 /* Set NPORT-ID and LUN number*/
2208 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2209 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2210 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2211 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2213 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2214 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2216 /* Total Data and protection segment(s) */
2217 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2219 /* Build IOCB segments and adjust for data protection segments */
2220 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2221 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2225 cmd_pkt->entry_count = (uint8_t)req_cnt;
2226 cmd_pkt->timeout = cpu_to_le16(0);
2229 /* Adjust ring index. */
2231 if (req->ring_index == req->length) {
2232 req->ring_index = 0;
2233 req->ring_ptr = req->ring;
2237 sp->qpair->cmd_cnt++;
2238 /* Set chip new ring index. */
2239 wrt_reg_dword(req->req_q_in, req->ring_index);
2241 /* Manage unprocessed RIO/ZIO commands in response queue. */
2242 if (vha->flags.process_response_queue &&
2243 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2244 qla24xx_process_response_queue(vha, rsp);
2246 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2251 if (status & QDSS_GOT_Q_SPACE) {
2252 req->outstanding_cmds[handle] = NULL;
2253 req->cnt += req_cnt;
2255 /* Cleanup will be performed by the caller (queuecommand) */
2257 qla_put_iocbs(sp->qpair, &sp->iores);
2258 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2260 return QLA_FUNCTION_FAILED;
2263 /* Generic Control-SRB manipulation functions. */
2265 /* hardware_lock assumed to be held. */
2268 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2270 scsi_qla_host_t *vha = qpair->vha;
2271 struct qla_hw_data *ha = vha->hw;
2272 struct req_que *req = qpair->req;
2273 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2276 uint16_t cnt, req_cnt;
2282 if (sp && (sp->type != SRB_SCSI_CMD)) {
2283 /* Adjust entry-counts as needed. */
2284 req_cnt = sp->iocbs;
2287 /* Check for room on request queue. */
2288 if (req->cnt < req_cnt + 2) {
2289 if (qpair->use_shadow_reg)
2290 cnt = *req->out_ptr;
2291 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2293 cnt = rd_reg_dword(®->isp25mq.req_q_out);
2294 else if (IS_P3P_TYPE(ha))
2295 cnt = rd_reg_dword(reg->isp82.req_q_out);
2296 else if (IS_FWI2_CAPABLE(ha))
2297 cnt = rd_reg_dword(®->isp24.req_q_out);
2298 else if (IS_QLAFX00(ha))
2299 cnt = rd_reg_dword(®->ispfx00.req_q_out);
2301 cnt = qla2x00_debounce_register(
2302 ISP_REQ_Q_OUT(ha, ®->isp));
2304 if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2305 qla_schedule_eeh_work(vha);
2309 if (req->ring_index < cnt)
2310 req->cnt = cnt - req->ring_index;
2312 req->cnt = req->length -
2313 (req->ring_index - cnt);
2315 if (req->cnt < req_cnt + 2)
2319 handle = qla2xxx_get_next_handle(req);
2321 ql_log(ql_log_warn, vha, 0x700b,
2322 "No room on outstanding cmd array.\n");
2326 /* Prep command array. */
2327 req->current_outstanding_cmd = handle;
2328 req->outstanding_cmds[handle] = sp;
2329 sp->handle = handle;
2333 req->cnt -= req_cnt;
2334 pkt = req->ring_ptr;
2335 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2336 if (IS_QLAFX00(ha)) {
2337 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2338 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2340 pkt->entry_count = req_cnt;
2341 pkt->handle = handle;
2347 qpair->tgt_counters.num_alloc_iocb_failed++;
2352 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2354 scsi_qla_host_t *vha = qpair->vha;
2356 if (qla2x00_reset_active(vha))
2359 return __qla2x00_alloc_iocbs(qpair, sp);
2363 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2365 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2369 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2371 struct srb_iocb *lio = &sp->u.iocb_cmd;
2373 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2374 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2375 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2376 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2377 if (sp->vha->flags.nvme_first_burst)
2378 logio->io_parameter[0] =
2379 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2380 if (sp->vha->flags.nvme2_enabled) {
2381 /* Set service parameter BIT_7 for NVME CONF support */
2382 logio->io_parameter[0] |=
2383 cpu_to_le32(NVME_PRLI_SP_CONF);
2384 /* Set service parameter BIT_8 for SLER support */
2385 logio->io_parameter[0] |=
2386 cpu_to_le32(NVME_PRLI_SP_SLER);
2387 /* Set service parameter BIT_9 for PI control support */
2388 logio->io_parameter[0] |=
2389 cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2393 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2394 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2395 logio->port_id[1] = sp->fcport->d_id.b.area;
2396 logio->port_id[2] = sp->fcport->d_id.b.domain;
2397 logio->vp_index = sp->vha->vp_idx;
2401 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2403 struct srb_iocb *lio = &sp->u.iocb_cmd;
2405 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2406 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2408 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2409 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2411 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2412 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2413 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2414 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2415 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2417 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2418 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2419 logio->port_id[1] = sp->fcport->d_id.b.area;
2420 logio->port_id[2] = sp->fcport->d_id.b.domain;
2421 logio->vp_index = sp->vha->vp_idx;
2425 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2427 struct qla_hw_data *ha = sp->vha->hw;
2428 struct srb_iocb *lio = &sp->u.iocb_cmd;
2431 mbx->entry_type = MBX_IOCB_TYPE;
2432 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2433 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2434 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2435 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2436 if (HAS_EXTENDED_IDS(ha)) {
2437 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2438 mbx->mb10 = cpu_to_le16(opts);
2440 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2442 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2443 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2444 sp->fcport->d_id.b.al_pa);
2445 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2449 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2451 u16 control_flags = LCF_COMMAND_LOGO;
2452 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2454 if (sp->fcport->explicit_logout) {
2455 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2457 control_flags |= LCF_IMPL_LOGO;
2459 if (!sp->fcport->keep_nport_handle)
2460 control_flags |= LCF_FREE_NPORT;
2463 logio->control_flags = cpu_to_le16(control_flags);
2464 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2465 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2466 logio->port_id[1] = sp->fcport->d_id.b.area;
2467 logio->port_id[2] = sp->fcport->d_id.b.domain;
2468 logio->vp_index = sp->vha->vp_idx;
2472 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2474 struct qla_hw_data *ha = sp->vha->hw;
2476 mbx->entry_type = MBX_IOCB_TYPE;
2477 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2478 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2479 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2480 cpu_to_le16(sp->fcport->loop_id) :
2481 cpu_to_le16(sp->fcport->loop_id << 8);
2482 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2483 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2484 sp->fcport->d_id.b.al_pa);
2485 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2486 /* Implicit: mbx->mbx10 = 0. */
2490 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2492 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2493 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2494 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2495 logio->vp_index = sp->vha->vp_idx;
2499 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2501 struct qla_hw_data *ha = sp->vha->hw;
2503 mbx->entry_type = MBX_IOCB_TYPE;
2504 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2505 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2506 if (HAS_EXTENDED_IDS(ha)) {
2507 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2508 mbx->mb10 = cpu_to_le16(BIT_0);
2510 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2512 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2513 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2514 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2515 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2516 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2520 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2524 struct fc_port *fcport = sp->fcport;
2525 scsi_qla_host_t *vha = fcport->vha;
2526 struct qla_hw_data *ha = vha->hw;
2527 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2528 struct req_que *req = vha->req;
2530 flags = iocb->u.tmf.flags;
2531 lun = iocb->u.tmf.lun;
2533 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2534 tsk->entry_count = 1;
2535 tsk->handle = make_handle(req->id, tsk->handle);
2536 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2537 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2538 tsk->control_flags = cpu_to_le32(flags);
2539 tsk->port_id[0] = fcport->d_id.b.al_pa;
2540 tsk->port_id[1] = fcport->d_id.b.area;
2541 tsk->port_id[2] = fcport->d_id.b.domain;
2542 tsk->vp_index = fcport->vha->vp_idx;
2544 if (flags == TCF_LUN_RESET) {
2545 int_to_scsilun(lun, &tsk->lun);
2546 host_to_fcp_swap((uint8_t *)&tsk->lun,
2551 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2553 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2554 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2555 sp->free = qla2x00_sp_free;
2556 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2557 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2558 sp->start_timer = 1;
2561 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2563 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2567 if (elsio->u.els_logo.els_logo_pyld)
2568 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2569 elsio->u.els_logo.els_logo_pyld,
2570 elsio->u.els_logo.els_logo_pyld_dma);
2572 del_timer(&elsio->timer);
2577 qla2x00_els_dcmd_iocb_timeout(void *data)
2580 fc_port_t *fcport = sp->fcport;
2581 struct scsi_qla_host *vha = sp->vha;
2582 struct srb_iocb *lio = &sp->u.iocb_cmd;
2583 unsigned long flags = 0;
2586 ql_dbg(ql_dbg_io, vha, 0x3069,
2587 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2588 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2589 fcport->d_id.b.al_pa);
2591 /* Abort the exchange */
2592 res = qla24xx_async_abort_cmd(sp, false);
2594 ql_dbg(ql_dbg_io, vha, 0x3070,
2595 "mbx abort_command failed.\n");
2596 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2597 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2598 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2599 sp->qpair->req->outstanding_cmds[h] = NULL;
2603 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2604 complete(&lio->u.els_logo.comp);
2606 ql_dbg(ql_dbg_io, vha, 0x3071,
2607 "mbx abort_command success.\n");
2611 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2613 fc_port_t *fcport = sp->fcport;
2614 struct srb_iocb *lio = &sp->u.iocb_cmd;
2615 struct scsi_qla_host *vha = sp->vha;
2617 ql_dbg(ql_dbg_io, vha, 0x3072,
2618 "%s hdl=%x, portid=%02x%02x%02x done\n",
2619 sp->name, sp->handle, fcport->d_id.b.domain,
2620 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2622 complete(&lio->u.els_logo.comp);
2626 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2627 port_id_t remote_did)
2630 fc_port_t *fcport = NULL;
2631 struct srb_iocb *elsio = NULL;
2632 struct qla_hw_data *ha = vha->hw;
2633 struct els_logo_payload logo_pyld;
2634 int rval = QLA_SUCCESS;
2636 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2638 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2642 /* Alloc SRB structure */
2643 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2646 ql_log(ql_log_info, vha, 0x70e6,
2647 "SRB allocation failed\n");
2651 elsio = &sp->u.iocb_cmd;
2652 fcport->loop_id = 0xFFFF;
2653 fcport->d_id.b.domain = remote_did.b.domain;
2654 fcport->d_id.b.area = remote_did.b.area;
2655 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2657 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2658 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2660 sp->type = SRB_ELS_DCMD;
2661 sp->name = "ELS_DCMD";
2662 sp->fcport = fcport;
2663 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2664 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2665 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2666 sp->done = qla2x00_els_dcmd_sp_done;
2667 sp->free = qla2x00_els_dcmd_sp_free;
2669 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2670 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2673 if (!elsio->u.els_logo.els_logo_pyld) {
2675 return QLA_FUNCTION_FAILED;
2678 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2680 elsio->u.els_logo.els_cmd = els_opcode;
2681 logo_pyld.opcode = els_opcode;
2682 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2683 logo_pyld.s_id[1] = vha->d_id.b.area;
2684 logo_pyld.s_id[2] = vha->d_id.b.domain;
2685 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2686 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2688 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2689 sizeof(struct els_logo_payload));
2690 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2691 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2692 elsio->u.els_logo.els_logo_pyld,
2693 sizeof(*elsio->u.els_logo.els_logo_pyld));
2695 rval = qla2x00_start_sp(sp);
2696 if (rval != QLA_SUCCESS) {
2698 return QLA_FUNCTION_FAILED;
2701 ql_dbg(ql_dbg_io, vha, 0x3074,
2702 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2703 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2704 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2706 wait_for_completion(&elsio->u.els_logo.comp);
2713 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2715 scsi_qla_host_t *vha = sp->vha;
2716 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2718 els_iocb->entry_type = ELS_IOCB_TYPE;
2719 els_iocb->entry_count = 1;
2720 els_iocb->sys_define = 0;
2721 els_iocb->entry_status = 0;
2722 els_iocb->handle = sp->handle;
2723 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2724 els_iocb->tx_dsd_count = cpu_to_le16(1);
2725 els_iocb->vp_index = vha->vp_idx;
2726 els_iocb->sof_type = EST_SOFI3;
2727 els_iocb->rx_dsd_count = 0;
2728 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2730 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2731 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2732 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2733 /* For SID the byte order is different than DID */
2734 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2735 els_iocb->s_id[2] = vha->d_id.b.area;
2736 els_iocb->s_id[0] = vha->d_id.b.domain;
2738 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2739 els_iocb->control_flags = 0;
2740 els_iocb->tx_byte_count = els_iocb->tx_len =
2741 cpu_to_le32(sizeof(struct els_plogi_payload));
2742 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2743 &els_iocb->tx_address);
2744 els_iocb->rx_dsd_count = cpu_to_le16(1);
2745 els_iocb->rx_byte_count = els_iocb->rx_len =
2746 cpu_to_le32(sizeof(struct els_plogi_payload));
2747 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2748 &els_iocb->rx_address);
2750 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2751 "PLOGI ELS IOCB:\n");
2752 ql_dump_buffer(ql_log_info, vha, 0x0109,
2753 (uint8_t *)els_iocb,
2756 els_iocb->control_flags = cpu_to_le16(1 << 13);
2757 els_iocb->tx_byte_count =
2758 cpu_to_le32(sizeof(struct els_logo_payload));
2759 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2760 &els_iocb->tx_address);
2761 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2763 els_iocb->rx_byte_count = 0;
2764 els_iocb->rx_address = 0;
2765 els_iocb->rx_len = 0;
2766 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2768 ql_dump_buffer(ql_log_info, vha, 0x010b,
2773 sp->vha->qla_stats.control_requests++;
2777 qla2x00_els_dcmd2_iocb_timeout(void *data)
2780 fc_port_t *fcport = sp->fcport;
2781 struct scsi_qla_host *vha = sp->vha;
2782 unsigned long flags = 0;
2785 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2786 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2787 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2789 /* Abort the exchange */
2790 res = qla24xx_async_abort_cmd(sp, false);
2791 ql_dbg(ql_dbg_io, vha, 0x3070,
2792 "mbx abort_command %s\n",
2793 (res == QLA_SUCCESS) ? "successful" : "failed");
2795 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2796 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2797 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2798 sp->qpair->req->outstanding_cmds[h] = NULL;
2802 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2803 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2807 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2809 if (els_plogi->els_plogi_pyld)
2810 dma_free_coherent(&vha->hw->pdev->dev,
2812 els_plogi->els_plogi_pyld,
2813 els_plogi->els_plogi_pyld_dma);
2815 if (els_plogi->els_resp_pyld)
2816 dma_free_coherent(&vha->hw->pdev->dev,
2818 els_plogi->els_resp_pyld,
2819 els_plogi->els_resp_pyld_dma);
2822 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2824 fc_port_t *fcport = sp->fcport;
2825 struct srb_iocb *lio = &sp->u.iocb_cmd;
2826 struct scsi_qla_host *vha = sp->vha;
2827 struct event_arg ea;
2828 struct qla_work_evt *e;
2829 struct fc_port *conflict_fcport;
2830 port_id_t cid; /* conflict Nport id */
2831 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2834 ql_dbg(ql_dbg_disc, vha, 0x3072,
2835 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2836 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2838 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2839 del_timer(&sp->u.iocb_cmd.timer);
2841 if (sp->flags & SRB_WAKEUP_ON_COMP)
2842 complete(&lio->u.els_plogi.comp);
2844 switch (le32_to_cpu(fw_status[0])) {
2845 case CS_DATA_UNDERRUN:
2847 memset(&ea, 0, sizeof(ea));
2850 qla_handle_els_plogi_done(vha, &ea);
2854 switch (le32_to_cpu(fw_status[1])) {
2855 case LSC_SCODE_PORTID_USED:
2856 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2857 qlt_find_sess_invalidate_other(vha,
2858 wwn_to_u64(fcport->port_name),
2859 fcport->d_id, lid, &conflict_fcport);
2860 if (conflict_fcport) {
2862 * Another fcport shares the same
2863 * loop_id & nport id; conflict
2864 * fcport needs to finish cleanup
2865 * before this fcport can proceed
2868 conflict_fcport->conflict = fcport;
2869 fcport->login_pause = 1;
2870 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2871 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2874 fcport->d_id.b24, lid);
2876 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2877 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2880 fcport->d_id.b24, lid);
2881 qla2x00_clear_loop_id(fcport);
2882 set_bit(lid, vha->hw->loop_id_map);
2883 fcport->loop_id = lid;
2884 fcport->keep_nport_handle = 0;
2885 qlt_schedule_sess_for_deletion(fcport);
2889 case LSC_SCODE_NPORT_USED:
2890 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2892 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2894 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
2897 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2898 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2899 __func__, __LINE__, fcport->port_name,
2900 fcport->loop_id, cid.b24);
2901 set_bit(fcport->loop_id,
2902 vha->hw->loop_id_map);
2903 fcport->loop_id = FC_NO_LOOP_ID;
2904 qla24xx_post_gnl_work(vha, fcport);
2907 case LSC_SCODE_NOXCB:
2908 vha->hw->exch_starvation++;
2909 if (vha->hw->exch_starvation > 5) {
2910 ql_log(ql_log_warn, vha, 0xd046,
2911 "Exchange starvation. Resetting RISC\n");
2912 vha->hw->exch_starvation = 0;
2913 set_bit(ISP_ABORT_NEEDED,
2915 qla2xxx_wake_dpc(vha);
2919 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2920 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2921 __func__, sp->fcport->port_name,
2922 fw_status[0], fw_status[1], fw_status[2]);
2924 fcport->flags &= ~FCF_ASYNC_SENT;
2925 qla2x00_set_fcport_disc_state(fcport,
2927 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2933 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2934 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2935 __func__, sp->fcport->port_name,
2936 fw_status[0], fw_status[1], fw_status[2]);
2938 sp->fcport->flags &= ~FCF_ASYNC_SENT;
2939 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2940 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2944 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2946 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2948 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2953 qla2x00_post_work(vha, e);
2958 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2959 fc_port_t *fcport, bool wait)
2962 struct srb_iocb *elsio = NULL;
2963 struct qla_hw_data *ha = vha->hw;
2964 int rval = QLA_SUCCESS;
2965 void *ptr, *resp_ptr;
2967 /* Alloc SRB structure */
2968 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2970 ql_log(ql_log_info, vha, 0x70e6,
2971 "SRB allocation failed\n");
2972 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2976 fcport->flags |= FCF_ASYNC_SENT;
2977 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2978 elsio = &sp->u.iocb_cmd;
2979 ql_dbg(ql_dbg_io, vha, 0x3073,
2980 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2982 sp->type = SRB_ELS_DCMD;
2983 sp->name = "ELS_DCMD";
2984 sp->fcport = fcport;
2986 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2988 sp->flags = SRB_WAKEUP_ON_COMP;
2990 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2992 sp->done = qla2x00_els_dcmd2_sp_done;
2993 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2995 ptr = elsio->u.els_plogi.els_plogi_pyld =
2996 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
2997 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2999 if (!elsio->u.els_plogi.els_plogi_pyld) {
3000 rval = QLA_FUNCTION_FAILED;
3004 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3005 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3006 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3008 if (!elsio->u.els_plogi.els_resp_pyld) {
3009 rval = QLA_FUNCTION_FAILED;
3013 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3015 memset(ptr, 0, sizeof(struct els_plogi_payload));
3016 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3017 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3018 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3020 elsio->u.els_plogi.els_cmd = els_opcode;
3021 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3023 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3024 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3025 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3026 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3028 init_completion(&elsio->u.els_plogi.comp);
3029 rval = qla2x00_start_sp(sp);
3030 if (rval != QLA_SUCCESS) {
3031 rval = QLA_FUNCTION_FAILED;
3033 ql_dbg(ql_dbg_disc, vha, 0x3074,
3034 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3035 sp->name, sp->handle, fcport->loop_id,
3036 fcport->d_id.b24, vha->d_id.b24);
3040 wait_for_completion(&elsio->u.els_plogi.comp);
3042 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3043 rval = QLA_FUNCTION_FAILED;
3049 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3050 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3057 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3059 struct bsg_job *bsg_job = sp->u.bsg_job;
3060 struct fc_bsg_request *bsg_request = bsg_job->request;
3062 els_iocb->entry_type = ELS_IOCB_TYPE;
3063 els_iocb->entry_count = 1;
3064 els_iocb->sys_define = 0;
3065 els_iocb->entry_status = 0;
3066 els_iocb->handle = sp->handle;
3067 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3068 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3069 els_iocb->vp_index = sp->vha->vp_idx;
3070 els_iocb->sof_type = EST_SOFI3;
3071 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3074 sp->type == SRB_ELS_CMD_RPT ?
3075 bsg_request->rqst_data.r_els.els_code :
3076 bsg_request->rqst_data.h_els.command_code;
3077 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3078 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3079 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3080 els_iocb->control_flags = 0;
3081 els_iocb->rx_byte_count =
3082 cpu_to_le32(bsg_job->reply_payload.payload_len);
3083 els_iocb->tx_byte_count =
3084 cpu_to_le32(bsg_job->request_payload.payload_len);
3086 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3087 &els_iocb->tx_address);
3088 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3089 (bsg_job->request_payload.sg_list));
3091 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3092 &els_iocb->rx_address);
3093 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3094 (bsg_job->reply_payload.sg_list));
3096 sp->vha->qla_stats.control_requests++;
3100 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3102 uint16_t avail_dsds;
3103 struct dsd64 *cur_dsd;
3104 struct scatterlist *sg;
3107 scsi_qla_host_t *vha = sp->vha;
3108 struct qla_hw_data *ha = vha->hw;
3109 struct bsg_job *bsg_job = sp->u.bsg_job;
3110 int entry_count = 1;
3112 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3113 ct_iocb->entry_type = CT_IOCB_TYPE;
3114 ct_iocb->entry_status = 0;
3115 ct_iocb->handle1 = sp->handle;
3116 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3117 ct_iocb->status = cpu_to_le16(0);
3118 ct_iocb->control_flags = cpu_to_le16(0);
3119 ct_iocb->timeout = 0;
3120 ct_iocb->cmd_dsd_count =
3121 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3122 ct_iocb->total_dsd_count =
3123 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3124 ct_iocb->req_bytecount =
3125 cpu_to_le32(bsg_job->request_payload.payload_len);
3126 ct_iocb->rsp_bytecount =
3127 cpu_to_le32(bsg_job->reply_payload.payload_len);
3129 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3130 &ct_iocb->req_dsd.address);
3131 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3133 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3134 &ct_iocb->rsp_dsd.address);
3135 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3138 cur_dsd = &ct_iocb->rsp_dsd;
3140 tot_dsds = bsg_job->reply_payload.sg_cnt;
3142 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3143 cont_a64_entry_t *cont_pkt;
3145 /* Allocate additional continuation packets? */
3146 if (avail_dsds == 0) {
3148 * Five DSDs are available in the Cont.
3151 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3152 vha->hw->req_q_map[0]);
3153 cur_dsd = cont_pkt->dsd;
3158 append_dsd64(&cur_dsd, sg);
3161 ct_iocb->entry_count = entry_count;
3163 sp->vha->qla_stats.control_requests++;
3167 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3169 uint16_t avail_dsds;
3170 struct dsd64 *cur_dsd;
3171 struct scatterlist *sg;
3173 uint16_t cmd_dsds, rsp_dsds;
3174 scsi_qla_host_t *vha = sp->vha;
3175 struct qla_hw_data *ha = vha->hw;
3176 struct bsg_job *bsg_job = sp->u.bsg_job;
3177 int entry_count = 1;
3178 cont_a64_entry_t *cont_pkt = NULL;
3180 ct_iocb->entry_type = CT_IOCB_TYPE;
3181 ct_iocb->entry_status = 0;
3182 ct_iocb->sys_define = 0;
3183 ct_iocb->handle = sp->handle;
3185 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3186 ct_iocb->vp_index = sp->vha->vp_idx;
3187 ct_iocb->comp_status = cpu_to_le16(0);
3189 cmd_dsds = bsg_job->request_payload.sg_cnt;
3190 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3192 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3193 ct_iocb->timeout = 0;
3194 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3195 ct_iocb->cmd_byte_count =
3196 cpu_to_le32(bsg_job->request_payload.payload_len);
3199 cur_dsd = ct_iocb->dsd;
3202 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3203 /* Allocate additional continuation packets? */
3204 if (avail_dsds == 0) {
3206 * Five DSDs are available in the Cont.
3209 cont_pkt = qla2x00_prep_cont_type1_iocb(
3210 vha, ha->req_q_map[0]);
3211 cur_dsd = cont_pkt->dsd;
3216 append_dsd64(&cur_dsd, sg);
3222 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3223 /* Allocate additional continuation packets? */
3224 if (avail_dsds == 0) {
3226 * Five DSDs are available in the Cont.
3229 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3231 cur_dsd = cont_pkt->dsd;
3236 append_dsd64(&cur_dsd, sg);
3239 ct_iocb->entry_count = entry_count;
3243 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3244 * @sp: command to send to the ISP
3246 * Returns non-zero if a failure occurred, else zero.
3249 qla82xx_start_scsi(srb_t *sp)
3252 unsigned long flags;
3253 struct scsi_cmnd *cmd;
3259 struct device_reg_82xx __iomem *reg;
3262 uint8_t additional_cdb_len;
3263 struct ct6_dsd *ctx;
3264 struct scsi_qla_host *vha = sp->vha;
3265 struct qla_hw_data *ha = vha->hw;
3266 struct req_que *req = NULL;
3267 struct rsp_que *rsp = NULL;
3269 /* Setup device pointers. */
3270 reg = &ha->iobase->isp82;
3271 cmd = GET_CMD_SP(sp);
3273 rsp = ha->rsp_q_map[0];
3275 /* So we know we haven't pci_map'ed anything yet */
3278 dbval = 0x04 | (ha->portnum << 5);
3280 /* Send marker if required */
3281 if (vha->marker_needed != 0) {
3282 if (qla2x00_marker(vha, ha->base_qpair,
3283 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3284 ql_log(ql_log_warn, vha, 0x300c,
3285 "qla2x00_marker failed for cmd=%p.\n", cmd);
3286 return QLA_FUNCTION_FAILED;
3288 vha->marker_needed = 0;
3291 /* Acquire ring specific lock */
3292 spin_lock_irqsave(&ha->hardware_lock, flags);
3294 handle = qla2xxx_get_next_handle(req);
3298 /* Map the sg table so we have an accurate count of sg entries needed */
3299 if (scsi_sg_count(cmd)) {
3300 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3301 scsi_sg_count(cmd), cmd->sc_data_direction);
3302 if (unlikely(!nseg))
3309 if (tot_dsds > ql2xshiftctondsd) {
3310 struct cmd_type_6 *cmd_pkt;
3311 uint16_t more_dsd_lists = 0;
3312 struct dsd_dma *dsd_ptr;
3315 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3316 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3317 ql_dbg(ql_dbg_io, vha, 0x300d,
3318 "Num of DSD list %d is than %d for cmd=%p.\n",
3319 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3324 if (more_dsd_lists <= ha->gbl_dsd_avail)
3325 goto sufficient_dsds;
3327 more_dsd_lists -= ha->gbl_dsd_avail;
3329 for (i = 0; i < more_dsd_lists; i++) {
3330 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3332 ql_log(ql_log_fatal, vha, 0x300e,
3333 "Failed to allocate memory for dsd_dma "
3334 "for cmd=%p.\n", cmd);
3338 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3339 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3340 if (!dsd_ptr->dsd_addr) {
3342 ql_log(ql_log_fatal, vha, 0x300f,
3343 "Failed to allocate memory for dsd_addr "
3344 "for cmd=%p.\n", cmd);
3347 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3348 ha->gbl_dsd_avail++;
3354 if (req->cnt < (req_cnt + 2)) {
3355 cnt = (uint16_t)rd_reg_dword_relaxed(
3356 ®->req_q_out[0]);
3357 if (req->ring_index < cnt)
3358 req->cnt = cnt - req->ring_index;
3360 req->cnt = req->length -
3361 (req->ring_index - cnt);
3362 if (req->cnt < (req_cnt + 2))
3366 ctx = sp->u.scmd.ct6_ctx =
3367 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3369 ql_log(ql_log_fatal, vha, 0x3010,
3370 "Failed to allocate ctx for cmd=%p.\n", cmd);
3374 memset(ctx, 0, sizeof(struct ct6_dsd));
3375 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3376 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3377 if (!ctx->fcp_cmnd) {
3378 ql_log(ql_log_fatal, vha, 0x3011,
3379 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3383 /* Initialize the DSD list and dma handle */
3384 INIT_LIST_HEAD(&ctx->dsd_list);
3385 ctx->dsd_use_cnt = 0;
3387 if (cmd->cmd_len > 16) {
3388 additional_cdb_len = cmd->cmd_len - 16;
3389 if ((cmd->cmd_len % 4) != 0) {
3390 /* SCSI command bigger than 16 bytes must be
3393 ql_log(ql_log_warn, vha, 0x3012,
3394 "scsi cmd len %d not multiple of 4 "
3395 "for cmd=%p.\n", cmd->cmd_len, cmd);
3396 goto queuing_error_fcp_cmnd;
3398 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3400 additional_cdb_len = 0;
3401 ctx->fcp_cmnd_len = 12 + 16 + 4;
3404 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3405 cmd_pkt->handle = make_handle(req->id, handle);
3407 /* Zero out remaining portion of packet. */
3408 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3409 clr_ptr = (uint32_t *)cmd_pkt + 2;
3410 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3411 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3413 /* Set NPORT-ID and LUN number*/
3414 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3415 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3416 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3417 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3418 cmd_pkt->vp_index = sp->vha->vp_idx;
3420 /* Build IOCB segments */
3421 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3422 goto queuing_error_fcp_cmnd;
3424 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3425 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3427 /* build FCP_CMND IU */
3428 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3429 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3431 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3432 ctx->fcp_cmnd->additional_cdb_len |= 1;
3433 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3434 ctx->fcp_cmnd->additional_cdb_len |= 2;
3436 /* Populate the FCP_PRIO. */
3437 if (ha->flags.fcp_prio_enabled)
3438 ctx->fcp_cmnd->task_attribute |=
3439 sp->fcport->fcp_prio << 3;
3441 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3443 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3444 additional_cdb_len);
3445 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3447 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3448 put_unaligned_le64(ctx->fcp_cmnd_dma,
3449 &cmd_pkt->fcp_cmnd_dseg_address);
3451 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3452 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3453 /* Set total data segment count. */
3454 cmd_pkt->entry_count = (uint8_t)req_cnt;
3455 /* Specify response queue number where
3456 * completion should happen
3458 cmd_pkt->entry_status = (uint8_t) rsp->id;
3460 struct cmd_type_7 *cmd_pkt;
3462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3463 if (req->cnt < (req_cnt + 2)) {
3464 cnt = (uint16_t)rd_reg_dword_relaxed(
3465 ®->req_q_out[0]);
3466 if (req->ring_index < cnt)
3467 req->cnt = cnt - req->ring_index;
3469 req->cnt = req->length -
3470 (req->ring_index - cnt);
3472 if (req->cnt < (req_cnt + 2))
3475 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3476 cmd_pkt->handle = make_handle(req->id, handle);
3478 /* Zero out remaining portion of packet. */
3479 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3480 clr_ptr = (uint32_t *)cmd_pkt + 2;
3481 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3482 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3484 /* Set NPORT-ID and LUN number*/
3485 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3486 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3487 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3488 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3489 cmd_pkt->vp_index = sp->vha->vp_idx;
3491 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3492 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3493 sizeof(cmd_pkt->lun));
3495 /* Populate the FCP_PRIO. */
3496 if (ha->flags.fcp_prio_enabled)
3497 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3499 /* Load SCSI command packet. */
3500 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3501 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3503 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3505 /* Build IOCB segments */
3506 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3508 /* Set total data segment count. */
3509 cmd_pkt->entry_count = (uint8_t)req_cnt;
3510 /* Specify response queue number where
3511 * completion should happen.
3513 cmd_pkt->entry_status = (uint8_t) rsp->id;
3516 /* Build command packet. */
3517 req->current_outstanding_cmd = handle;
3518 req->outstanding_cmds[handle] = sp;
3519 sp->handle = handle;
3520 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3521 req->cnt -= req_cnt;
3524 /* Adjust ring index. */
3526 if (req->ring_index == req->length) {
3527 req->ring_index = 0;
3528 req->ring_ptr = req->ring;
3532 sp->flags |= SRB_DMA_VALID;
3534 /* Set chip new ring index. */
3535 /* write, read and verify logic */
3536 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3538 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3540 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3542 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3543 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3548 /* Manage unprocessed RIO/ZIO commands in response queue. */
3549 if (vha->flags.process_response_queue &&
3550 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3551 qla24xx_process_response_queue(vha, rsp);
3553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3556 queuing_error_fcp_cmnd:
3557 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3560 scsi_dma_unmap(cmd);
3562 if (sp->u.scmd.crc_ctx) {
3563 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3564 sp->u.scmd.crc_ctx = NULL;
3566 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3568 return QLA_FUNCTION_FAILED;
3572 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3574 struct srb_iocb *aio = &sp->u.iocb_cmd;
3575 scsi_qla_host_t *vha = sp->vha;
3576 struct req_que *req = sp->qpair->req;
3577 srb_t *orig_sp = sp->cmd_sp;
3579 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3580 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3581 abt_iocb->entry_count = 1;
3582 abt_iocb->handle = make_handle(req->id, sp->handle);
3584 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3585 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3586 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3587 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3589 abt_iocb->handle_to_abort =
3590 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3591 aio->u.abt.cmd_hndl);
3592 abt_iocb->vp_index = vha->vp_idx;
3593 abt_iocb->req_que_no = aio->u.abt.req_que_no;
3595 /* need to pass original sp */
3597 qla_nvme_abort_set_option(abt_iocb, orig_sp);
3599 /* Send the command to the firmware */
3604 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3608 mbx->entry_type = MBX_IOCB_TYPE;
3609 mbx->handle = sp->handle;
3610 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3612 for (i = 0; i < sz; i++)
3613 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3617 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3619 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3620 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3621 ct_pkt->handle = sp->handle;
3624 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3625 struct nack_to_isp *nack)
3627 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3629 nack->entry_type = NOTIFY_ACK_TYPE;
3630 nack->entry_count = 1;
3631 nack->ox_id = ntfy->ox_id;
3633 nack->u.isp24.handle = sp->handle;
3634 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3635 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3636 nack->u.isp24.flags = ntfy->u.isp24.flags &
3637 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3639 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3640 nack->u.isp24.status = ntfy->u.isp24.status;
3641 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3642 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3643 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3644 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3645 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3646 nack->u.isp24.srr_flags = 0;
3647 nack->u.isp24.srr_reject_code = 0;
3648 nack->u.isp24.srr_reject_code_expl = 0;
3649 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3653 * Build NVME LS request
3656 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3658 struct srb_iocb *nvme;
3660 nvme = &sp->u.iocb_cmd;
3661 cmd_pkt->entry_type = PT_LS4_REQUEST;
3662 cmd_pkt->entry_count = 1;
3663 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3665 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3666 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3667 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3669 cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3670 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3671 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3672 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3674 cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3675 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3676 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3677 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3681 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3685 vce->entry_type = VP_CTRL_IOCB_TYPE;
3686 vce->handle = sp->handle;
3687 vce->entry_count = 1;
3688 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3689 vce->vp_count = cpu_to_le16(1);
3692 * index map in firmware starts with 1; decrement index
3693 * this is ok as we never use index 0
3695 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3696 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3697 vce->vp_idx_map[map] |= 1 << pos;
3701 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3703 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3704 logio->control_flags =
3705 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3707 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3708 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3709 logio->port_id[1] = sp->fcport->d_id.b.area;
3710 logio->port_id[2] = sp->fcport->d_id.b.domain;
3711 logio->vp_index = sp->fcport->vha->vp_idx;
3715 qla2x00_start_sp(srb_t *sp)
3717 int rval = QLA_SUCCESS;
3718 scsi_qla_host_t *vha = sp->vha;
3719 struct qla_hw_data *ha = vha->hw;
3720 struct qla_qpair *qp = sp->qpair;
3722 unsigned long flags;
3724 if (vha->hw->flags.eeh_busy)
3727 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3728 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3731 ql_log(ql_log_warn, vha, 0x700c,
3732 "qla2x00_alloc_iocbs failed.\n");
3738 IS_FWI2_CAPABLE(ha) ?
3739 qla24xx_login_iocb(sp, pkt) :
3740 qla2x00_login_iocb(sp, pkt);
3743 qla24xx_prli_iocb(sp, pkt);
3745 case SRB_LOGOUT_CMD:
3746 IS_FWI2_CAPABLE(ha) ?
3747 qla24xx_logout_iocb(sp, pkt) :
3748 qla2x00_logout_iocb(sp, pkt);
3750 case SRB_ELS_CMD_RPT:
3751 case SRB_ELS_CMD_HST:
3752 qla24xx_els_iocb(sp, pkt);
3755 IS_FWI2_CAPABLE(ha) ?
3756 qla24xx_ct_iocb(sp, pkt) :
3757 qla2x00_ct_iocb(sp, pkt);
3760 IS_FWI2_CAPABLE(ha) ?
3761 qla24xx_adisc_iocb(sp, pkt) :
3762 qla2x00_adisc_iocb(sp, pkt);
3766 qlafx00_tm_iocb(sp, pkt) :
3767 qla24xx_tm_iocb(sp, pkt);
3769 case SRB_FXIOCB_DCMD:
3770 case SRB_FXIOCB_BCMD:
3771 qlafx00_fxdisc_iocb(sp, pkt);
3774 qla_nvme_ls(sp, pkt);
3778 qlafx00_abort_iocb(sp, pkt) :
3779 qla24xx_abort_iocb(sp, pkt);
3782 qla24xx_els_logo_iocb(sp, pkt);
3784 case SRB_CT_PTHRU_CMD:
3785 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3788 qla2x00_mb_iocb(sp, pkt);
3790 case SRB_NACK_PLOGI:
3793 qla2x00_send_notify_ack_iocb(sp, pkt);
3796 qla25xx_ctrlvp_iocb(sp, pkt);
3799 qla24xx_prlo_iocb(sp, pkt);
3805 if (sp->start_timer)
3806 add_timer(&sp->u.iocb_cmd.timer);
3809 qla2x00_start_iocbs(vha, qp->req);
3811 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3816 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3817 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3819 uint16_t avail_dsds;
3820 struct dsd64 *cur_dsd;
3821 uint32_t req_data_len = 0;
3822 uint32_t rsp_data_len = 0;
3823 struct scatterlist *sg;
3825 int entry_count = 1;
3826 struct bsg_job *bsg_job = sp->u.bsg_job;
3828 /*Update entry type to indicate bidir command */
3829 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3831 /* Set the transfer direction, in this set both flags
3832 * Also set the BD_WRAP_BACK flag, firmware will take care
3833 * assigning DID=SID for outgoing pkts.
3835 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3836 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3837 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3840 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3841 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3842 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3843 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3845 vha->bidi_stats.transfer_bytes += req_data_len;
3846 vha->bidi_stats.io_count++;
3848 vha->qla_stats.output_bytes += req_data_len;
3849 vha->qla_stats.output_requests++;
3851 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3852 * are bundled in continuation iocb
3855 cur_dsd = &cmd_pkt->fcp_dsd;
3859 for_each_sg(bsg_job->request_payload.sg_list, sg,
3860 bsg_job->request_payload.sg_cnt, index) {
3861 cont_a64_entry_t *cont_pkt;
3863 /* Allocate additional continuation packets */
3864 if (avail_dsds == 0) {
3865 /* Continuation type 1 IOCB can accomodate
3868 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3869 cur_dsd = cont_pkt->dsd;
3873 append_dsd64(&cur_dsd, sg);
3876 /* For read request DSD will always goes to continuation IOCB
3877 * and follow the write DSD. If there is room on the current IOCB
3878 * then it is added to that IOCB else new continuation IOCB is
3881 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3882 bsg_job->reply_payload.sg_cnt, index) {
3883 cont_a64_entry_t *cont_pkt;
3885 /* Allocate additional continuation packets */
3886 if (avail_dsds == 0) {
3887 /* Continuation type 1 IOCB can accomodate
3890 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3891 cur_dsd = cont_pkt->dsd;
3895 append_dsd64(&cur_dsd, sg);
3898 /* This value should be same as number of IOCB required for this cmd */
3899 cmd_pkt->entry_count = entry_count;
3903 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3906 struct qla_hw_data *ha = vha->hw;
3907 unsigned long flags;
3912 struct cmd_bidir *cmd_pkt = NULL;
3913 struct rsp_que *rsp;
3914 struct req_que *req;
3915 int rval = EXT_STATUS_OK;
3919 rsp = ha->rsp_q_map[0];
3922 /* Send marker if required */
3923 if (vha->marker_needed != 0) {
3924 if (qla2x00_marker(vha, ha->base_qpair,
3925 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3926 return EXT_STATUS_MAILBOX;
3927 vha->marker_needed = 0;
3930 /* Acquire ring specific lock */
3931 spin_lock_irqsave(&ha->hardware_lock, flags);
3933 handle = qla2xxx_get_next_handle(req);
3935 rval = EXT_STATUS_BUSY;
3939 /* Calculate number of IOCB required */
3940 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3942 /* Check for room on request queue. */
3943 if (req->cnt < req_cnt + 2) {
3944 if (IS_SHADOW_REG_CAPABLE(ha)) {
3945 cnt = *req->out_ptr;
3947 cnt = rd_reg_dword_relaxed(req->req_q_out);
3948 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
3952 if (req->ring_index < cnt)
3953 req->cnt = cnt - req->ring_index;
3955 req->cnt = req->length -
3956 (req->ring_index - cnt);
3958 if (req->cnt < req_cnt + 2) {
3959 rval = EXT_STATUS_BUSY;
3963 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3964 cmd_pkt->handle = make_handle(req->id, handle);
3966 /* Zero out remaining portion of packet. */
3967 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3968 clr_ptr = (uint32_t *)cmd_pkt + 2;
3969 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3971 /* Set NPORT-ID (of vha)*/
3972 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3973 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3974 cmd_pkt->port_id[1] = vha->d_id.b.area;
3975 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3977 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3978 cmd_pkt->entry_status = (uint8_t) rsp->id;
3979 /* Build command packet. */
3980 req->current_outstanding_cmd = handle;
3981 req->outstanding_cmds[handle] = sp;
3982 sp->handle = handle;
3983 req->cnt -= req_cnt;
3985 /* Send the command to the firmware */
3987 qla2x00_start_iocbs(vha, req);
3989 spin_unlock_irqrestore(&ha->hardware_lock, flags);