1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * Returns the proper CF_* direction based on CDB.
20 static inline uint16_t
21 qla2x00_get_cmd_direction(srb_t *sp)
24 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 struct scsi_qla_host *vha = sp->vha;
29 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 vha->qla_stats.output_requests++;
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 vha->qla_stats.input_requests++;
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment descriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment descriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @req: request queue
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
150 /* We always use DIFF Bundling for best performance */
153 /* Translate SCSI opcode to a protection opcode */
154 switch (scsi_get_prot_op(cmd)) {
155 case SCSI_PROT_READ_STRIP:
156 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 case SCSI_PROT_WRITE_INSERT:
159 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 case SCSI_PROT_READ_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 case SCSI_PROT_WRITE_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 case SCSI_PROT_READ_PASS:
168 case SCSI_PROT_WRITE_PASS:
169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 default: /* Normal Request */
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 return scsi_prot_sg_count(cmd);
183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184 * capable IOCB types.
186 * @sp: SRB command to process
187 * @cmd_pkt: Command type 2 IOCB
188 * @tot_dsds: Total number of segments to transfer
190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194 struct dsd32 *cur_dsd;
195 scsi_qla_host_t *vha;
196 struct scsi_cmnd *cmd;
197 struct scatterlist *sg;
200 cmd = GET_CMD_SP(sp);
202 /* Update entry type to indicate Command Type 2 IOCB */
203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
205 /* No data transfer */
206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
207 cmd_pkt->byte_count = cpu_to_le32(0);
212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214 /* Three DSDs are available in the Command Type 2 IOCB */
215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
216 cur_dsd = cmd_pkt->dsd32;
218 /* Load data segments */
219 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
220 cont_entry_t *cont_pkt;
222 /* Allocate additional continuation packets? */
223 if (avail_dsds == 0) {
225 * Seven DSDs are available in the Continuation
228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
229 cur_dsd = cont_pkt->dsd;
230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
233 append_dsd32(&cur_dsd, sg);
239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
240 * capable IOCB types.
242 * @sp: SRB command to process
243 * @cmd_pkt: Command type 3 IOCB
244 * @tot_dsds: Total number of segments to transfer
246 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 struct dsd64 *cur_dsd;
251 scsi_qla_host_t *vha;
252 struct scsi_cmnd *cmd;
253 struct scatterlist *sg;
256 cmd = GET_CMD_SP(sp);
258 /* Update entry type to indicate Command Type 3 IOCB */
259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
261 /* No data transfer */
262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
263 cmd_pkt->byte_count = cpu_to_le32(0);
268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
270 /* Two DSDs are available in the Command Type 3 IOCB */
271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
272 cur_dsd = cmd_pkt->dsd64;
274 /* Load data segments */
275 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
276 cont_a64_entry_t *cont_pkt;
278 /* Allocate additional continuation packets? */
279 if (avail_dsds == 0) {
281 * Five DSDs are available in the Continuation
284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
285 cur_dsd = cont_pkt->dsd;
286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
289 append_dsd64(&cur_dsd, sg);
295 * Find the first handle that is not in use, starting from
296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
297 * associated with @req.
299 uint32_t qla2xxx_get_next_handle(struct req_que *req)
301 uint32_t index, handle = req->current_outstanding_cmd;
303 for (index = 1; index < req->num_outstanding_cmds; index++) {
305 if (handle == req->num_outstanding_cmds)
307 if (!req->outstanding_cmds[handle])
315 * qla2x00_start_scsi() - Send a SCSI command to the ISP
316 * @sp: command to send to the ISP
318 * Returns non-zero if a failure occurred, else zero.
321 qla2x00_start_scsi(srb_t *sp)
325 scsi_qla_host_t *vha;
326 struct scsi_cmnd *cmd;
329 cmd_entry_t *cmd_pkt;
333 struct device_reg_2xxx __iomem *reg;
334 struct qla_hw_data *ha;
338 /* Setup device pointers. */
341 reg = &ha->iobase->isp;
342 cmd = GET_CMD_SP(sp);
343 req = ha->req_q_map[0];
344 rsp = ha->rsp_q_map[0];
345 /* So we know we haven't pci_map'ed anything yet */
348 /* Send marker if required */
349 if (vha->marker_needed != 0) {
350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
352 return (QLA_FUNCTION_FAILED);
354 vha->marker_needed = 0;
357 /* Acquire ring specific lock */
358 spin_lock_irqsave(&ha->hardware_lock, flags);
360 handle = qla2xxx_get_next_handle(req);
364 /* Map the sg table so we have an accurate count of sg entries needed */
365 if (scsi_sg_count(cmd)) {
366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
367 scsi_sg_count(cmd), cmd->sc_data_direction);
375 /* Calculate the number of request entries needed. */
376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
377 if (req->cnt < (req_cnt + 2)) {
378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
379 if (req->ring_index < cnt)
380 req->cnt = cnt - req->ring_index;
382 req->cnt = req->length -
383 (req->ring_index - cnt);
384 /* If still no head room then bail out */
385 if (req->cnt < (req_cnt + 2))
389 /* Build command packet */
390 req->current_outstanding_cmd = handle;
391 req->outstanding_cmds[handle] = sp;
393 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
396 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
397 cmd_pkt->handle = handle;
398 /* Zero out remaining portion of packet. */
399 clr_ptr = (uint32_t *)cmd_pkt + 2;
400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403 /* Set target ID and LUN number*/
404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
408 /* Load SCSI command packet. */
409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
412 /* Build IOCB segments */
413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
415 /* Set total data segment count. */
416 cmd_pkt->entry_count = (uint8_t)req_cnt;
419 /* Adjust ring index. */
421 if (req->ring_index == req->length) {
423 req->ring_ptr = req->ring;
427 sp->flags |= SRB_DMA_VALID;
429 /* Set chip new ring index. */
430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
433 /* Manage unprocessed RIO/ZIO commands in response queue. */
434 if (vha->flags.process_response_queue &&
435 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
436 qla2x00_process_response_queue(rsp);
438 spin_unlock_irqrestore(&ha->hardware_lock, flags);
439 return (QLA_SUCCESS);
445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
447 return (QLA_FUNCTION_FAILED);
451 * qla2x00_start_iocbs() - Execute the IOCB command
453 * @req: request queue
456 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
458 struct qla_hw_data *ha = vha->hw;
459 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
461 if (IS_P3P_TYPE(ha)) {
462 qla82xx_start_iocbs(vha);
464 /* Adjust ring index. */
466 if (req->ring_index == req->length) {
468 req->ring_ptr = req->ring;
472 /* Set chip new ring index. */
473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
474 wrt_reg_dword(req->req_q_in, req->ring_index);
475 } else if (IS_QLA83XX(ha)) {
476 wrt_reg_dword(req->req_q_in, req->ring_index);
477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
478 } else if (IS_QLAFX00(ha)) {
479 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index);
480 rd_reg_dword_relaxed(®->ispfx00.req_q_in);
481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
482 } else if (IS_FWI2_CAPABLE(ha)) {
483 wrt_reg_dword(®->isp24.req_q_in, req->ring_index);
484 rd_reg_dword_relaxed(®->isp24.req_q_in);
486 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp),
488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp));
494 * qla2x00_marker() - Send a marker IOCB to the firmware.
496 * @qpair: queue pair pointer
499 * @type: marker modifier
501 * Can be called from both normal and interrupt context.
503 * Returns non-zero if a failure occurred, else zero.
506 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
507 uint16_t loop_id, uint64_t lun, uint8_t type)
510 struct mrk_entry_24xx *mrk24 = NULL;
511 struct req_que *req = qpair->req;
512 struct qla_hw_data *ha = vha->hw;
513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED);
523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
525 if (type != MK_SYNC_ALL) {
526 if (IS_FWI2_CAPABLE(ha)) {
527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531 mrk24->vp_index = vha->vp_idx;
532 mrk24->handle = make_handle(req->id, mrk24->handle);
534 SET_TARGET_ID(ha, mrk->target, loop_id);
535 mrk->lun = cpu_to_le16((uint16_t)lun);
540 qla2x00_start_iocbs(vha, req);
542 return (QLA_SUCCESS);
546 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
547 uint16_t loop_id, uint64_t lun, uint8_t type)
550 unsigned long flags = 0;
552 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
560 * qla2x00_issue_marker
563 * Caller CAN have hardware lock held as specified by ha_locked parameter.
564 * Might release it, then reaquire.
566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
574 MK_SYNC_ALL) != QLA_SUCCESS)
575 return QLA_FUNCTION_FAILED;
577 vha->marker_needed = 0;
583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
586 struct dsd64 *cur_dsd = NULL, *next_dsd;
587 scsi_qla_host_t *vha;
588 struct qla_hw_data *ha;
589 struct scsi_cmnd *cmd;
590 struct scatterlist *cur_seg;
592 uint8_t first_iocb = 1;
593 uint32_t dsd_list_len;
594 struct dsd_dma *dsd_ptr;
597 cmd = GET_CMD_SP(sp);
599 /* Update entry type to indicate Command Type 3 IOCB */
600 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
602 /* No data transfer */
603 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
604 cmd_pkt->byte_count = cpu_to_le32(0);
611 /* Set transfer direction */
612 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
613 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
614 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
615 vha->qla_stats.output_requests++;
616 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
617 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
618 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
619 vha->qla_stats.input_requests++;
622 cur_seg = scsi_sglist(cmd);
623 ctx = sp->u.scmd.ct6_ctx;
626 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
627 QLA_DSDS_PER_IOCB : tot_dsds;
628 tot_dsds -= avail_dsds;
629 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
631 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
632 struct dsd_dma, list);
633 next_dsd = dsd_ptr->dsd_addr;
634 list_del(&dsd_ptr->list);
636 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
642 put_unaligned_le64(dsd_ptr->dsd_list_dma,
643 &cmd_pkt->fcp_dsd.address);
644 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
646 put_unaligned_le64(dsd_ptr->dsd_list_dma,
648 cur_dsd->length = cpu_to_le32(dsd_list_len);
653 append_dsd64(&cur_dsd, cur_seg);
654 cur_seg = sg_next(cur_seg);
659 /* Null termination */
660 cur_dsd->address = 0;
663 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
668 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
669 * for Command Type 6.
671 * @dsds: number of data segment descriptors needed
673 * Returns the number of dsd list needed to store @dsds.
675 static inline uint16_t
676 qla24xx_calc_dsd_lists(uint16_t dsds)
678 uint16_t dsd_lists = 0;
680 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
681 if (dsds % QLA_DSDS_PER_IOCB)
688 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
691 * @sp: SRB command to process
692 * @cmd_pkt: Command type 3 IOCB
693 * @tot_dsds: Total number of segments to transfer
694 * @req: pointer to request queue
697 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
698 uint16_t tot_dsds, struct req_que *req)
701 struct dsd64 *cur_dsd;
702 scsi_qla_host_t *vha;
703 struct scsi_cmnd *cmd;
704 struct scatterlist *sg;
707 cmd = GET_CMD_SP(sp);
709 /* Update entry type to indicate Command Type 3 IOCB */
710 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
712 /* No data transfer */
713 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
714 cmd_pkt->byte_count = cpu_to_le32(0);
720 /* Set transfer direction */
721 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
722 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
723 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
724 vha->qla_stats.output_requests++;
725 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
726 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
727 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
728 vha->qla_stats.input_requests++;
731 /* One DSD is available in the Command Type 3 IOCB */
733 cur_dsd = &cmd_pkt->dsd;
735 /* Load data segments */
737 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
738 cont_a64_entry_t *cont_pkt;
740 /* Allocate additional continuation packets? */
741 if (avail_dsds == 0) {
743 * Five DSDs are available in the Continuation
746 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
747 cur_dsd = cont_pkt->dsd;
748 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
751 append_dsd64(&cur_dsd, sg);
756 struct fw_dif_context {
759 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
760 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
764 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
768 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
769 unsigned int protcnt)
771 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
773 switch (scsi_get_prot_type(cmd)) {
774 case SCSI_PROT_DIF_TYPE0:
776 * No check for ql2xenablehba_err_chk, as it would be an
777 * I/O error if hba tag generation is not done.
779 pkt->ref_tag = cpu_to_le32((uint32_t)
780 (0xffffffff & scsi_get_lba(cmd)));
782 if (!qla2x00_hba_err_chk_enabled(sp))
785 pkt->ref_tag_mask[0] = 0xff;
786 pkt->ref_tag_mask[1] = 0xff;
787 pkt->ref_tag_mask[2] = 0xff;
788 pkt->ref_tag_mask[3] = 0xff;
792 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
793 * match LBA in CDB + N
795 case SCSI_PROT_DIF_TYPE2:
796 pkt->app_tag = cpu_to_le16(0);
797 pkt->app_tag_mask[0] = 0x0;
798 pkt->app_tag_mask[1] = 0x0;
800 pkt->ref_tag = cpu_to_le32((uint32_t)
801 (0xffffffff & scsi_get_lba(cmd)));
803 if (!qla2x00_hba_err_chk_enabled(sp))
806 /* enable ALL bytes of the ref tag */
807 pkt->ref_tag_mask[0] = 0xff;
808 pkt->ref_tag_mask[1] = 0xff;
809 pkt->ref_tag_mask[2] = 0xff;
810 pkt->ref_tag_mask[3] = 0xff;
813 /* For Type 3 protection: 16 bit GUARD only */
814 case SCSI_PROT_DIF_TYPE3:
815 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
816 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
821 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
824 case SCSI_PROT_DIF_TYPE1:
825 pkt->ref_tag = cpu_to_le32((uint32_t)
826 (0xffffffff & scsi_get_lba(cmd)));
827 pkt->app_tag = cpu_to_le16(0);
828 pkt->app_tag_mask[0] = 0x0;
829 pkt->app_tag_mask[1] = 0x0;
831 if (!qla2x00_hba_err_chk_enabled(sp))
834 /* enable ALL bytes of the ref tag */
835 pkt->ref_tag_mask[0] = 0xff;
836 pkt->ref_tag_mask[1] = 0xff;
837 pkt->ref_tag_mask[2] = 0xff;
838 pkt->ref_tag_mask[3] = 0xff;
844 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
847 struct scatterlist *sg;
848 uint32_t cumulative_partial, sg_len;
849 dma_addr_t sg_dma_addr;
851 if (sgx->num_bytes == sgx->tot_bytes)
855 cumulative_partial = sgx->tot_partial;
857 sg_dma_addr = sg_dma_address(sg);
858 sg_len = sg_dma_len(sg);
860 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
862 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
863 sgx->dma_len = (blk_sz - cumulative_partial);
864 sgx->tot_partial = 0;
865 sgx->num_bytes += blk_sz;
868 sgx->dma_len = sg_len - sgx->bytes_consumed;
869 sgx->tot_partial += sgx->dma_len;
873 sgx->bytes_consumed += sgx->dma_len;
875 if (sg_len == sgx->bytes_consumed) {
879 sgx->bytes_consumed = 0;
886 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
887 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
890 uint8_t avail_dsds = 0;
891 uint32_t dsd_list_len;
892 struct dsd_dma *dsd_ptr;
893 struct scatterlist *sg_prot;
894 struct dsd64 *cur_dsd = dsd;
895 uint16_t used_dsds = tot_dsds;
896 uint32_t prot_int; /* protection interval */
900 uint32_t sle_dma_len, tot_prot_dma_len = 0;
901 struct scsi_cmnd *cmd;
903 memset(&sgx, 0, sizeof(struct qla2_sgx));
905 cmd = GET_CMD_SP(sp);
906 prot_int = cmd->device->sector_size;
908 sgx.tot_bytes = scsi_bufflen(cmd);
909 sgx.cur_sg = scsi_sglist(cmd);
912 sg_prot = scsi_prot_sglist(cmd);
914 prot_int = tc->blk_sz;
915 sgx.tot_bytes = tc->bufflen;
917 sg_prot = tc->prot_sg;
923 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
925 sle_dma = sgx.dma_addr;
926 sle_dma_len = sgx.dma_len;
928 /* Allocate additional continuation packets? */
929 if (avail_dsds == 0) {
930 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
931 QLA_DSDS_PER_IOCB : used_dsds;
932 dsd_list_len = (avail_dsds + 1) * 12;
933 used_dsds -= avail_dsds;
935 /* allocate tracking DS */
936 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
940 /* allocate new list */
941 dsd_ptr->dsd_addr = next_dsd =
942 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
943 &dsd_ptr->dsd_list_dma);
947 * Need to cleanup only this dsd_ptr, rest
948 * will be done by sp_free_dma()
955 list_add_tail(&dsd_ptr->list,
956 &sp->u.scmd.crc_ctx->dsd_list);
958 sp->flags |= SRB_CRC_CTX_DSD_VALID;
960 list_add_tail(&dsd_ptr->list,
961 &(tc->ctx->dsd_list));
962 *tc->ctx_dsd_alloced = 1;
966 /* add new list to cmd iocb or last list */
967 put_unaligned_le64(dsd_ptr->dsd_list_dma,
969 cur_dsd->length = cpu_to_le32(dsd_list_len);
972 put_unaligned_le64(sle_dma, &cur_dsd->address);
973 cur_dsd->length = cpu_to_le32(sle_dma_len);
978 /* Got a full protection interval */
979 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
982 tot_prot_dma_len += sle_dma_len;
983 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
984 tot_prot_dma_len = 0;
985 sg_prot = sg_next(sg_prot);
988 partial = 1; /* So as to not re-enter this block */
992 /* Null termination */
993 cur_dsd->address = 0;
1000 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1001 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1004 uint8_t avail_dsds = 0;
1005 uint32_t dsd_list_len;
1006 struct dsd_dma *dsd_ptr;
1007 struct scatterlist *sg, *sgl;
1008 struct dsd64 *cur_dsd = dsd;
1010 uint16_t used_dsds = tot_dsds;
1011 struct scsi_cmnd *cmd;
1014 cmd = GET_CMD_SP(sp);
1015 sgl = scsi_sglist(cmd);
1024 for_each_sg(sgl, sg, tot_dsds, i) {
1025 /* Allocate additional continuation packets? */
1026 if (avail_dsds == 0) {
1027 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1028 QLA_DSDS_PER_IOCB : used_dsds;
1029 dsd_list_len = (avail_dsds + 1) * 12;
1030 used_dsds -= avail_dsds;
1032 /* allocate tracking DS */
1033 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1037 /* allocate new list */
1038 dsd_ptr->dsd_addr = next_dsd =
1039 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1040 &dsd_ptr->dsd_list_dma);
1044 * Need to cleanup only this dsd_ptr, rest
1045 * will be done by sp_free_dma()
1052 list_add_tail(&dsd_ptr->list,
1053 &sp->u.scmd.crc_ctx->dsd_list);
1055 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057 list_add_tail(&dsd_ptr->list,
1058 &(tc->ctx->dsd_list));
1059 *tc->ctx_dsd_alloced = 1;
1062 /* add new list to cmd iocb or last list */
1063 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065 cur_dsd->length = cpu_to_le32(dsd_list_len);
1068 append_dsd64(&cur_dsd, sg);
1072 /* Null termination */
1073 cur_dsd->address = 0;
1074 cur_dsd->length = 0;
1080 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1081 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1084 struct scatterlist *sg, *sgl;
1085 struct crc_context *difctx = NULL;
1086 struct scsi_qla_host *vha;
1088 uint avail_dsds = 0;
1089 uint used_dsds = tot_dsds;
1090 bool dif_local_dma_alloc = false;
1091 bool direction_to_device = false;
1095 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097 sgl = scsi_prot_sglist(cmd);
1099 difctx = sp->u.scmd.crc_ctx;
1100 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1101 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1102 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1103 __func__, cmd, difctx, sp);
1108 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1114 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1115 "%s: enter (write=%u)\n", __func__, direction_to_device);
1117 /* if initiator doing write or target doing read */
1118 if (direction_to_device) {
1119 for_each_sg(sgl, sg, tot_dsds, i) {
1120 u64 sle_phys = sg_phys(sg);
1122 /* If SGE addr + len flips bits in upper 32-bits */
1123 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1124 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1125 "%s: page boundary crossing (phys=%llx len=%x)\n",
1126 __func__, sle_phys, sg->length);
1129 ha->dif_bundle_crossed_pages++;
1130 dif_local_dma_alloc = true;
1132 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134 "%s: difctx pointer is NULL\n",
1140 ha->dif_bundle_writes++;
1142 ha->dif_bundle_reads++;
1145 if (ql2xdifbundlinginternalbuffers)
1146 dif_local_dma_alloc = direction_to_device;
1148 if (dif_local_dma_alloc) {
1149 u32 track_difbundl_buf = 0;
1150 u32 ldma_sg_len = 0;
1153 difctx->no_dif_bundl = 0;
1154 difctx->dif_bundl_len = 0;
1156 /* Track DSD buffers */
1157 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1158 /* Track local DMA buffers */
1159 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161 for_each_sg(sgl, sg, tot_dsds, i) {
1162 u32 sglen = sg_dma_len(sg);
1164 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1165 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1166 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1167 difctx->dif_bundl_len, ldma_needed);
1174 * Allocate list item to store
1177 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1180 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1181 "%s: failed alloc dsd_ptr\n",
1185 ha->dif_bundle_kallocs++;
1187 /* allocate dma buffer */
1188 dsd_ptr->dsd_addr = dma_pool_alloc
1189 (ha->dif_bundl_pool, GFP_ATOMIC,
1190 &dsd_ptr->dsd_list_dma);
1191 if (!dsd_ptr->dsd_addr) {
1192 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1193 "%s: failed alloc ->dsd_ptr\n",
1196 * need to cleanup only this
1197 * dsd_ptr rest will be done
1201 ha->dif_bundle_kallocs--;
1204 ha->dif_bundle_dma_allocs++;
1206 difctx->no_dif_bundl++;
1207 list_add_tail(&dsd_ptr->list,
1208 &difctx->ldif_dma_hndl_list);
1211 /* xfrlen is min of dma pool size and sglen */
1213 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1214 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1217 /* replace with local allocated dma buffer */
1218 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1219 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1220 difctx->dif_bundl_len);
1221 difctx->dif_bundl_len += xfrlen;
1223 ldma_sg_len += xfrlen;
1224 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1232 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1233 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1234 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1235 difctx->dif_bundl_len, difctx->no_dif_bundl,
1236 track_difbundl_buf);
1239 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1244 &difctx->ldif_dma_hndl_list, list) {
1245 u32 sglen = (difctx->dif_bundl_len >
1246 DIF_BUNDLING_DMA_POOL_SIZE) ?
1247 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249 BUG_ON(track_difbundl_buf == 0);
1251 /* Allocate additional continuation packets? */
1252 if (avail_dsds == 0) {
1253 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255 "%s: adding continuation iocb's\n",
1257 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1258 QLA_DSDS_PER_IOCB : used_dsds;
1259 dsd_list_len = (avail_dsds + 1) * 12;
1260 used_dsds -= avail_dsds;
1262 /* allocate tracking DS */
1263 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1266 "%s: failed alloc dsd_ptr\n",
1270 ha->dif_bundle_kallocs++;
1272 difctx->no_ldif_dsd++;
1273 /* allocate new list */
1275 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1276 &dsd_ptr->dsd_list_dma);
1277 if (!dsd_ptr->dsd_addr) {
1278 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1279 "%s: failed alloc ->dsd_addr\n",
1282 * need to cleanup only this dsd_ptr
1283 * rest will be done by sp_free_dma()
1286 ha->dif_bundle_kallocs--;
1289 ha->dif_bundle_dma_allocs++;
1292 list_add_tail(&dsd_ptr->list,
1293 &difctx->ldif_dsd_list);
1294 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296 list_add_tail(&dsd_ptr->list,
1297 &difctx->ldif_dsd_list);
1298 tc->ctx_dsd_alloced = 1;
1301 /* add new list to cmd iocb or last list */
1302 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304 cur_dsd->length = cpu_to_le32(dsd_list_len);
1305 cur_dsd = dsd_ptr->dsd_addr;
1307 put_unaligned_le64(dif_dsd->dsd_list_dma,
1309 cur_dsd->length = cpu_to_le32(sglen);
1312 difctx->dif_bundl_len -= sglen;
1313 track_difbundl_buf--;
1316 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1317 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1318 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320 for_each_sg(sgl, sg, tot_dsds, i) {
1321 /* Allocate additional continuation packets? */
1322 if (avail_dsds == 0) {
1323 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1324 QLA_DSDS_PER_IOCB : used_dsds;
1325 dsd_list_len = (avail_dsds + 1) * 12;
1326 used_dsds -= avail_dsds;
1328 /* allocate tracking DS */
1329 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333 "%s: failed alloc dsd_dma...\n",
1338 /* allocate new list */
1340 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1341 &dsd_ptr->dsd_list_dma);
1342 if (!dsd_ptr->dsd_addr) {
1343 /* need to cleanup only this dsd_ptr */
1344 /* rest will be done by sp_free_dma() */
1350 list_add_tail(&dsd_ptr->list,
1352 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354 list_add_tail(&dsd_ptr->list,
1356 tc->ctx_dsd_alloced = 1;
1359 /* add new list to cmd iocb or last list */
1360 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362 cur_dsd->length = cpu_to_le32(dsd_list_len);
1363 cur_dsd = dsd_ptr->dsd_addr;
1365 append_dsd64(&cur_dsd, sg);
1369 /* Null termination */
1370 cur_dsd->address = 0;
1371 cur_dsd->length = 0;
1377 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1378 * Type 6 IOCB types.
1380 * @sp: SRB command to process
1381 * @cmd_pkt: Command type 3 IOCB
1382 * @tot_dsds: Total number of segments to transfer
1383 * @tot_prot_dsds: Total number of segments with protection information
1384 * @fw_prot_opts: Protection options to be passed to firmware
1387 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1388 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390 struct dsd64 *cur_dsd;
1392 scsi_qla_host_t *vha;
1393 struct scsi_cmnd *cmd;
1394 uint32_t total_bytes = 0;
1395 uint32_t data_bytes;
1397 uint8_t bundling = 1;
1399 struct crc_context *crc_ctx_pkt = NULL;
1400 struct qla_hw_data *ha;
1401 uint8_t additional_fcpcdb_len;
1402 uint16_t fcp_cmnd_len;
1403 struct fcp_cmnd *fcp_cmnd;
1404 dma_addr_t crc_ctx_dma;
1406 cmd = GET_CMD_SP(sp);
1408 /* Update entry type to indicate Command Type CRC_2 IOCB */
1409 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1414 /* No data transfer */
1415 data_bytes = scsi_bufflen(cmd);
1416 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1417 cmd_pkt->byte_count = cpu_to_le32(0);
1421 cmd_pkt->vp_index = sp->vha->vp_idx;
1423 /* Set transfer direction */
1424 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1425 cmd_pkt->control_flags =
1426 cpu_to_le16(CF_WRITE_DATA);
1427 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1428 cmd_pkt->control_flags =
1429 cpu_to_le16(CF_READ_DATA);
1432 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1433 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1434 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1438 /* Allocate CRC context from global pool */
1439 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1440 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1443 goto crc_queuing_error;
1445 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1450 crc_ctx_pkt->handle = cmd_pkt->handle;
1452 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1455 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1458 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1460 /* Determine SCSI command length -- align to 4 byte boundary */
1461 if (cmd->cmd_len > 16) {
1462 additional_fcpcdb_len = cmd->cmd_len - 16;
1463 if ((cmd->cmd_len % 4) != 0) {
1464 /* SCSI cmd > 16 bytes must be multiple of 4 */
1465 goto crc_queuing_error;
1467 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469 additional_fcpcdb_len = 0;
1470 fcp_cmnd_len = 12 + 16 + 4;
1473 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1476 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1477 fcp_cmnd->additional_cdb_len |= 1;
1478 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1479 fcp_cmnd->additional_cdb_len |= 2;
1481 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1482 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1483 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1484 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1485 &cmd_pkt->fcp_cmnd_dseg_address);
1486 fcp_cmnd->task_management = 0;
1487 fcp_cmnd->task_attribute = TSK_SIMPLE;
1489 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491 /* Compute dif len and adjust data len to incude protection */
1493 blk_size = cmd->device->sector_size;
1494 dif_bytes = (data_bytes / blk_size) * 8;
1496 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1497 case SCSI_PROT_READ_INSERT:
1498 case SCSI_PROT_WRITE_STRIP:
1499 total_bytes = data_bytes;
1500 data_bytes += dif_bytes;
1503 case SCSI_PROT_READ_STRIP:
1504 case SCSI_PROT_WRITE_INSERT:
1505 case SCSI_PROT_READ_PASS:
1506 case SCSI_PROT_WRITE_PASS:
1507 total_bytes = data_bytes + dif_bytes;
1513 if (!qla2x00_hba_err_chk_enabled(sp))
1514 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1515 /* HBA error checking enabled */
1516 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1517 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1518 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1519 SCSI_PROT_DIF_TYPE2))
1520 fw_prot_opts |= BIT_10;
1521 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1522 SCSI_PROT_DIF_TYPE3)
1523 fw_prot_opts |= BIT_11;
1527 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1530 * Configure Bundling if we need to fetch interlaving
1531 * protection PCI accesses
1533 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1534 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1535 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1540 /* Finish the common fields of CRC pkt */
1541 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1542 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1543 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1544 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1545 /* Fibre channel byte count */
1546 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1547 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1548 additional_fcpcdb_len);
1549 *fcp_dl = htonl(total_bytes);
1551 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1552 cmd_pkt->byte_count = cpu_to_le32(0);
1555 /* Walks data segments */
1557 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559 if (!bundling && tot_prot_dsds) {
1560 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1561 cur_dsd, tot_dsds, NULL))
1562 goto crc_queuing_error;
1563 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1564 (tot_dsds - tot_prot_dsds), NULL))
1565 goto crc_queuing_error;
1567 if (bundling && tot_prot_dsds) {
1568 /* Walks dif segments */
1569 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1570 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1571 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1572 tot_prot_dsds, NULL))
1573 goto crc_queuing_error;
1578 /* Cleanup will be performed by the caller */
1580 return QLA_FUNCTION_FAILED;
1584 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1585 * @sp: command to send to the ISP
1587 * Returns non-zero if a failure occurred, else zero.
1590 qla24xx_start_scsi(srb_t *sp)
1593 unsigned long flags;
1596 struct cmd_type_7 *cmd_pkt;
1600 struct req_que *req = NULL;
1601 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1602 struct scsi_qla_host *vha = sp->vha;
1603 struct qla_hw_data *ha = vha->hw;
1605 /* Setup device pointers. */
1608 /* So we know we haven't pci_map'ed anything yet */
1611 /* Send marker if required */
1612 if (vha->marker_needed != 0) {
1613 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615 return QLA_FUNCTION_FAILED;
1616 vha->marker_needed = 0;
1619 /* Acquire ring specific lock */
1620 spin_lock_irqsave(&ha->hardware_lock, flags);
1622 handle = qla2xxx_get_next_handle(req);
1626 /* Map the sg table so we have an accurate count of sg entries needed */
1627 if (scsi_sg_count(cmd)) {
1628 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1629 scsi_sg_count(cmd), cmd->sc_data_direction);
1630 if (unlikely(!nseg))
1636 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1637 if (req->cnt < (req_cnt + 2)) {
1638 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1639 rd_reg_dword_relaxed(req->req_q_out);
1640 if (req->ring_index < cnt)
1641 req->cnt = cnt - req->ring_index;
1643 req->cnt = req->length -
1644 (req->ring_index - cnt);
1645 if (req->cnt < (req_cnt + 2))
1649 /* Build command packet. */
1650 req->current_outstanding_cmd = handle;
1651 req->outstanding_cmds[handle] = sp;
1652 sp->handle = handle;
1653 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1654 req->cnt -= req_cnt;
1656 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1657 cmd_pkt->handle = make_handle(req->id, handle);
1659 /* Zero out remaining portion of packet. */
1660 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1661 clr_ptr = (uint32_t *)cmd_pkt + 2;
1662 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1663 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665 /* Set NPORT-ID and LUN number*/
1666 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1667 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1668 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1669 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1670 cmd_pkt->vp_index = sp->vha->vp_idx;
1672 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1673 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675 cmd_pkt->task = TSK_SIMPLE;
1677 /* Load SCSI command packet. */
1678 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1679 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683 /* Build IOCB segments */
1684 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686 /* Set total data segment count. */
1687 cmd_pkt->entry_count = (uint8_t)req_cnt;
1689 /* Adjust ring index. */
1691 if (req->ring_index == req->length) {
1692 req->ring_index = 0;
1693 req->ring_ptr = req->ring;
1697 sp->flags |= SRB_DMA_VALID;
1699 /* Set chip new ring index. */
1700 wrt_reg_dword(req->req_q_in, req->ring_index);
1702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1707 scsi_dma_unmap(cmd);
1709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711 return QLA_FUNCTION_FAILED;
1715 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1716 * @sp: command to send to the ISP
1718 * Returns non-zero if a failure occurred, else zero.
1721 qla24xx_dif_start_scsi(srb_t *sp)
1724 unsigned long flags;
1728 uint16_t req_cnt = 0;
1730 uint16_t tot_prot_dsds;
1731 uint16_t fw_prot_opts = 0;
1732 struct req_que *req = NULL;
1733 struct rsp_que *rsp = NULL;
1734 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1735 struct scsi_qla_host *vha = sp->vha;
1736 struct qla_hw_data *ha = vha->hw;
1737 struct cmd_type_crc_2 *cmd_pkt;
1738 uint32_t status = 0;
1740 #define QDSS_GOT_Q_SPACE BIT_0
1742 /* Only process protection or >16 cdb in this routine */
1743 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1744 if (cmd->cmd_len <= 16)
1745 return qla24xx_start_scsi(sp);
1748 /* Setup device pointers. */
1752 /* So we know we haven't pci_map'ed anything yet */
1755 /* Send marker if required */
1756 if (vha->marker_needed != 0) {
1757 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759 return QLA_FUNCTION_FAILED;
1760 vha->marker_needed = 0;
1763 /* Acquire ring specific lock */
1764 spin_lock_irqsave(&ha->hardware_lock, flags);
1766 handle = qla2xxx_get_next_handle(req);
1770 /* Compute number of required data segments */
1771 /* Map the sg table so we have an accurate count of sg entries needed */
1772 if (scsi_sg_count(cmd)) {
1773 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1774 scsi_sg_count(cmd), cmd->sc_data_direction);
1775 if (unlikely(!nseg))
1778 sp->flags |= SRB_DMA_VALID;
1780 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1781 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1782 struct qla2_sgx sgx;
1785 memset(&sgx, 0, sizeof(struct qla2_sgx));
1786 sgx.tot_bytes = scsi_bufflen(cmd);
1787 sgx.cur_sg = scsi_sglist(cmd);
1791 while (qla24xx_get_one_block_sg(
1792 cmd->device->sector_size, &sgx, &partial))
1798 /* number of required data segments */
1801 /* Compute number of required protection segments */
1802 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1803 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1804 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1805 if (unlikely(!nseg))
1808 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1811 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1812 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1819 /* Total Data and protection sg segment(s) */
1820 tot_prot_dsds = nseg;
1822 if (req->cnt < (req_cnt + 2)) {
1823 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1824 rd_reg_dword_relaxed(req->req_q_out);
1825 if (req->ring_index < cnt)
1826 req->cnt = cnt - req->ring_index;
1828 req->cnt = req->length -
1829 (req->ring_index - cnt);
1830 if (req->cnt < (req_cnt + 2))
1834 status |= QDSS_GOT_Q_SPACE;
1836 /* Build header part of command packet (excluding the OPCODE). */
1837 req->current_outstanding_cmd = handle;
1838 req->outstanding_cmds[handle] = sp;
1839 sp->handle = handle;
1840 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1841 req->cnt -= req_cnt;
1843 /* Fill-in common area */
1844 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1845 cmd_pkt->handle = make_handle(req->id, handle);
1847 clr_ptr = (uint32_t *)cmd_pkt + 2;
1848 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850 /* Set NPORT-ID and LUN number*/
1851 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1852 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1853 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1854 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1857 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859 /* Total Data and protection segment(s) */
1860 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862 /* Build IOCB segments and adjust for data protection segments */
1863 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1864 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1868 cmd_pkt->entry_count = (uint8_t)req_cnt;
1869 /* Specify response queue number where completion should happen */
1870 cmd_pkt->entry_status = (uint8_t) rsp->id;
1871 cmd_pkt->timeout = cpu_to_le16(0);
1874 /* Adjust ring index. */
1876 if (req->ring_index == req->length) {
1877 req->ring_index = 0;
1878 req->ring_ptr = req->ring;
1882 /* Set chip new ring index. */
1883 wrt_reg_dword(req->req_q_in, req->ring_index);
1885 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1890 if (status & QDSS_GOT_Q_SPACE) {
1891 req->outstanding_cmds[handle] = NULL;
1892 req->cnt += req_cnt;
1894 /* Cleanup will be performed by the caller (queuecommand) */
1896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1897 return QLA_FUNCTION_FAILED;
1901 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1902 * @sp: command to send to the ISP
1904 * Returns non-zero if a failure occurred, else zero.
1907 qla2xxx_start_scsi_mq(srb_t *sp)
1910 unsigned long flags;
1913 struct cmd_type_7 *cmd_pkt;
1917 struct req_que *req = NULL;
1918 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1919 struct scsi_qla_host *vha = sp->fcport->vha;
1920 struct qla_hw_data *ha = vha->hw;
1921 struct qla_qpair *qpair = sp->qpair;
1923 /* Acquire qpair specific lock */
1924 spin_lock_irqsave(&qpair->qp_lock, flags);
1926 /* Setup qpair pointers */
1929 /* So we know we haven't pci_map'ed anything yet */
1932 /* Send marker if required */
1933 if (vha->marker_needed != 0) {
1934 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1937 return QLA_FUNCTION_FAILED;
1939 vha->marker_needed = 0;
1942 handle = qla2xxx_get_next_handle(req);
1946 /* Map the sg table so we have an accurate count of sg entries needed */
1947 if (scsi_sg_count(cmd)) {
1948 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1949 scsi_sg_count(cmd), cmd->sc_data_direction);
1950 if (unlikely(!nseg))
1956 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1957 if (req->cnt < (req_cnt + 2)) {
1958 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1959 rd_reg_dword_relaxed(req->req_q_out);
1960 if (req->ring_index < cnt)
1961 req->cnt = cnt - req->ring_index;
1963 req->cnt = req->length -
1964 (req->ring_index - cnt);
1965 if (req->cnt < (req_cnt + 2))
1969 /* Build command packet. */
1970 req->current_outstanding_cmd = handle;
1971 req->outstanding_cmds[handle] = sp;
1972 sp->handle = handle;
1973 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1974 req->cnt -= req_cnt;
1976 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1977 cmd_pkt->handle = make_handle(req->id, handle);
1979 /* Zero out remaining portion of packet. */
1980 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1981 clr_ptr = (uint32_t *)cmd_pkt + 2;
1982 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1983 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985 /* Set NPORT-ID and LUN number*/
1986 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1987 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1988 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1989 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1990 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1993 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995 cmd_pkt->task = TSK_SIMPLE;
1997 /* Load SCSI command packet. */
1998 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1999 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003 /* Build IOCB segments */
2004 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006 /* Set total data segment count. */
2007 cmd_pkt->entry_count = (uint8_t)req_cnt;
2009 /* Adjust ring index. */
2011 if (req->ring_index == req->length) {
2012 req->ring_index = 0;
2013 req->ring_ptr = req->ring;
2017 sp->flags |= SRB_DMA_VALID;
2019 /* Set chip new ring index. */
2020 wrt_reg_dword(req->req_q_in, req->ring_index);
2022 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2027 scsi_dma_unmap(cmd);
2029 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031 return QLA_FUNCTION_FAILED;
2036 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2037 * @sp: command to send to the ISP
2039 * Returns non-zero if a failure occurred, else zero.
2042 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2045 unsigned long flags;
2049 uint16_t req_cnt = 0;
2051 uint16_t tot_prot_dsds;
2052 uint16_t fw_prot_opts = 0;
2053 struct req_que *req = NULL;
2054 struct rsp_que *rsp = NULL;
2055 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2056 struct scsi_qla_host *vha = sp->fcport->vha;
2057 struct qla_hw_data *ha = vha->hw;
2058 struct cmd_type_crc_2 *cmd_pkt;
2059 uint32_t status = 0;
2060 struct qla_qpair *qpair = sp->qpair;
2062 #define QDSS_GOT_Q_SPACE BIT_0
2064 /* Check for host side state */
2065 if (!qpair->online) {
2066 cmd->result = DID_NO_CONNECT << 16;
2067 return QLA_INTERFACE_ERROR;
2070 if (!qpair->difdix_supported &&
2071 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2072 cmd->result = DID_NO_CONNECT << 16;
2073 return QLA_INTERFACE_ERROR;
2076 /* Only process protection or >16 cdb in this routine */
2077 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2078 if (cmd->cmd_len <= 16)
2079 return qla2xxx_start_scsi_mq(sp);
2082 spin_lock_irqsave(&qpair->qp_lock, flags);
2084 /* Setup qpair pointers */
2088 /* So we know we haven't pci_map'ed anything yet */
2091 /* Send marker if required */
2092 if (vha->marker_needed != 0) {
2093 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2096 return QLA_FUNCTION_FAILED;
2098 vha->marker_needed = 0;
2101 handle = qla2xxx_get_next_handle(req);
2105 /* Compute number of required data segments */
2106 /* Map the sg table so we have an accurate count of sg entries needed */
2107 if (scsi_sg_count(cmd)) {
2108 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2109 scsi_sg_count(cmd), cmd->sc_data_direction);
2110 if (unlikely(!nseg))
2113 sp->flags |= SRB_DMA_VALID;
2115 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2116 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2117 struct qla2_sgx sgx;
2120 memset(&sgx, 0, sizeof(struct qla2_sgx));
2121 sgx.tot_bytes = scsi_bufflen(cmd);
2122 sgx.cur_sg = scsi_sglist(cmd);
2126 while (qla24xx_get_one_block_sg(
2127 cmd->device->sector_size, &sgx, &partial))
2133 /* number of required data segments */
2136 /* Compute number of required protection segments */
2137 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2138 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2139 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2140 if (unlikely(!nseg))
2143 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2146 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2147 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2154 /* Total Data and protection sg segment(s) */
2155 tot_prot_dsds = nseg;
2157 if (req->cnt < (req_cnt + 2)) {
2158 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2159 rd_reg_dword_relaxed(req->req_q_out);
2160 if (req->ring_index < cnt)
2161 req->cnt = cnt - req->ring_index;
2163 req->cnt = req->length -
2164 (req->ring_index - cnt);
2165 if (req->cnt < (req_cnt + 2))
2169 status |= QDSS_GOT_Q_SPACE;
2171 /* Build header part of command packet (excluding the OPCODE). */
2172 req->current_outstanding_cmd = handle;
2173 req->outstanding_cmds[handle] = sp;
2174 sp->handle = handle;
2175 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2176 req->cnt -= req_cnt;
2178 /* Fill-in common area */
2179 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2180 cmd_pkt->handle = make_handle(req->id, handle);
2182 clr_ptr = (uint32_t *)cmd_pkt + 2;
2183 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185 /* Set NPORT-ID and LUN number*/
2186 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2187 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2188 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2189 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2192 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194 /* Total Data and protection segment(s) */
2195 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197 /* Build IOCB segments and adjust for data protection segments */
2198 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2199 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2203 cmd_pkt->entry_count = (uint8_t)req_cnt;
2204 cmd_pkt->timeout = cpu_to_le16(0);
2207 /* Adjust ring index. */
2209 if (req->ring_index == req->length) {
2210 req->ring_index = 0;
2211 req->ring_ptr = req->ring;
2215 /* Set chip new ring index. */
2216 wrt_reg_dword(req->req_q_in, req->ring_index);
2218 /* Manage unprocessed RIO/ZIO commands in response queue. */
2219 if (vha->flags.process_response_queue &&
2220 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2221 qla24xx_process_response_queue(vha, rsp);
2223 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2228 if (status & QDSS_GOT_Q_SPACE) {
2229 req->outstanding_cmds[handle] = NULL;
2230 req->cnt += req_cnt;
2232 /* Cleanup will be performed by the caller (queuecommand) */
2234 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2235 return QLA_FUNCTION_FAILED;
2238 /* Generic Control-SRB manipulation functions. */
2240 /* hardware_lock assumed to be held. */
2243 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245 scsi_qla_host_t *vha = qpair->vha;
2246 struct qla_hw_data *ha = vha->hw;
2247 struct req_que *req = qpair->req;
2248 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2251 uint16_t cnt, req_cnt;
2257 if (sp && (sp->type != SRB_SCSI_CMD)) {
2258 /* Adjust entry-counts as needed. */
2259 req_cnt = sp->iocbs;
2262 /* Check for room on request queue. */
2263 if (req->cnt < req_cnt + 2) {
2264 if (qpair->use_shadow_reg)
2265 cnt = *req->out_ptr;
2266 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268 cnt = rd_reg_dword(®->isp25mq.req_q_out);
2269 else if (IS_P3P_TYPE(ha))
2270 cnt = rd_reg_dword(reg->isp82.req_q_out);
2271 else if (IS_FWI2_CAPABLE(ha))
2272 cnt = rd_reg_dword(®->isp24.req_q_out);
2273 else if (IS_QLAFX00(ha))
2274 cnt = rd_reg_dword(®->ispfx00.req_q_out);
2276 cnt = qla2x00_debounce_register(
2277 ISP_REQ_Q_OUT(ha, ®->isp));
2279 if (req->ring_index < cnt)
2280 req->cnt = cnt - req->ring_index;
2282 req->cnt = req->length -
2283 (req->ring_index - cnt);
2285 if (req->cnt < req_cnt + 2)
2289 handle = qla2xxx_get_next_handle(req);
2291 ql_log(ql_log_warn, vha, 0x700b,
2292 "No room on outstanding cmd array.\n");
2296 /* Prep command array. */
2297 req->current_outstanding_cmd = handle;
2298 req->outstanding_cmds[handle] = sp;
2299 sp->handle = handle;
2303 req->cnt -= req_cnt;
2304 pkt = req->ring_ptr;
2305 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2306 if (IS_QLAFX00(ha)) {
2307 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2308 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2310 pkt->entry_count = req_cnt;
2311 pkt->handle = handle;
2317 qpair->tgt_counters.num_alloc_iocb_failed++;
2322 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324 scsi_qla_host_t *vha = qpair->vha;
2326 if (qla2x00_reset_active(vha))
2329 return __qla2x00_alloc_iocbs(qpair, sp);
2333 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2339 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341 struct srb_iocb *lio = &sp->u.iocb_cmd;
2343 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2344 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2345 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2346 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2347 if (sp->vha->flags.nvme_first_burst)
2348 logio->io_parameter[0] =
2349 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2352 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354 logio->port_id[1] = sp->fcport->d_id.b.area;
2355 logio->port_id[2] = sp->fcport->d_id.b.domain;
2356 logio->vp_index = sp->vha->vp_idx;
2360 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2362 struct srb_iocb *lio = &sp->u.iocb_cmd;
2364 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2367 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2368 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2370 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2371 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2372 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2373 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2374 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2376 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2377 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2378 logio->port_id[1] = sp->fcport->d_id.b.area;
2379 logio->port_id[2] = sp->fcport->d_id.b.domain;
2380 logio->vp_index = sp->vha->vp_idx;
2384 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2386 struct qla_hw_data *ha = sp->vha->hw;
2387 struct srb_iocb *lio = &sp->u.iocb_cmd;
2390 mbx->entry_type = MBX_IOCB_TYPE;
2391 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2392 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2393 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2394 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2395 if (HAS_EXTENDED_IDS(ha)) {
2396 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2397 mbx->mb10 = cpu_to_le16(opts);
2399 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2401 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2402 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2403 sp->fcport->d_id.b.al_pa);
2404 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2408 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2410 u16 control_flags = LCF_COMMAND_LOGO;
2411 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2413 if (sp->fcport->explicit_logout) {
2414 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2416 control_flags |= LCF_IMPL_LOGO;
2418 if (!sp->fcport->keep_nport_handle)
2419 control_flags |= LCF_FREE_NPORT;
2422 logio->control_flags = cpu_to_le16(control_flags);
2423 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2424 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2425 logio->port_id[1] = sp->fcport->d_id.b.area;
2426 logio->port_id[2] = sp->fcport->d_id.b.domain;
2427 logio->vp_index = sp->vha->vp_idx;
2431 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2433 struct qla_hw_data *ha = sp->vha->hw;
2435 mbx->entry_type = MBX_IOCB_TYPE;
2436 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2437 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2438 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2439 cpu_to_le16(sp->fcport->loop_id) :
2440 cpu_to_le16(sp->fcport->loop_id << 8);
2441 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2442 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2443 sp->fcport->d_id.b.al_pa);
2444 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2445 /* Implicit: mbx->mbx10 = 0. */
2449 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2451 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2452 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2453 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2454 logio->vp_index = sp->vha->vp_idx;
2458 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2460 struct qla_hw_data *ha = sp->vha->hw;
2462 mbx->entry_type = MBX_IOCB_TYPE;
2463 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2464 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2465 if (HAS_EXTENDED_IDS(ha)) {
2466 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2467 mbx->mb10 = cpu_to_le16(BIT_0);
2469 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2471 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2472 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2473 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2474 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2475 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2479 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2483 struct fc_port *fcport = sp->fcport;
2484 scsi_qla_host_t *vha = fcport->vha;
2485 struct qla_hw_data *ha = vha->hw;
2486 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2487 struct req_que *req = vha->req;
2489 flags = iocb->u.tmf.flags;
2490 lun = iocb->u.tmf.lun;
2492 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2493 tsk->entry_count = 1;
2494 tsk->handle = make_handle(req->id, tsk->handle);
2495 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2496 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2497 tsk->control_flags = cpu_to_le32(flags);
2498 tsk->port_id[0] = fcport->d_id.b.al_pa;
2499 tsk->port_id[1] = fcport->d_id.b.area;
2500 tsk->port_id[2] = fcport->d_id.b.domain;
2501 tsk->vp_index = fcport->vha->vp_idx;
2503 if (flags == TCF_LUN_RESET) {
2504 int_to_scsilun(lun, &tsk->lun);
2505 host_to_fcp_swap((uint8_t *)&tsk->lun,
2510 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2512 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2513 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2514 sp->free = qla2x00_sp_free;
2515 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2516 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2517 sp->start_timer = 1;
2520 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2522 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2526 if (elsio->u.els_logo.els_logo_pyld)
2527 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2528 elsio->u.els_logo.els_logo_pyld,
2529 elsio->u.els_logo.els_logo_pyld_dma);
2531 del_timer(&elsio->timer);
2536 qla2x00_els_dcmd_iocb_timeout(void *data)
2539 fc_port_t *fcport = sp->fcport;
2540 struct scsi_qla_host *vha = sp->vha;
2541 struct srb_iocb *lio = &sp->u.iocb_cmd;
2542 unsigned long flags = 0;
2545 ql_dbg(ql_dbg_io, vha, 0x3069,
2546 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2547 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2548 fcport->d_id.b.al_pa);
2550 /* Abort the exchange */
2551 res = qla24xx_async_abort_cmd(sp, false);
2553 ql_dbg(ql_dbg_io, vha, 0x3070,
2554 "mbx abort_command failed.\n");
2555 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2556 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2557 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2558 sp->qpair->req->outstanding_cmds[h] = NULL;
2562 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2563 complete(&lio->u.els_logo.comp);
2565 ql_dbg(ql_dbg_io, vha, 0x3071,
2566 "mbx abort_command success.\n");
2570 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2572 fc_port_t *fcport = sp->fcport;
2573 struct srb_iocb *lio = &sp->u.iocb_cmd;
2574 struct scsi_qla_host *vha = sp->vha;
2576 ql_dbg(ql_dbg_io, vha, 0x3072,
2577 "%s hdl=%x, portid=%02x%02x%02x done\n",
2578 sp->name, sp->handle, fcport->d_id.b.domain,
2579 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2581 complete(&lio->u.els_logo.comp);
2585 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2586 port_id_t remote_did)
2589 fc_port_t *fcport = NULL;
2590 struct srb_iocb *elsio = NULL;
2591 struct qla_hw_data *ha = vha->hw;
2592 struct els_logo_payload logo_pyld;
2593 int rval = QLA_SUCCESS;
2595 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2597 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2601 /* Alloc SRB structure */
2602 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2605 ql_log(ql_log_info, vha, 0x70e6,
2606 "SRB allocation failed\n");
2610 elsio = &sp->u.iocb_cmd;
2611 fcport->loop_id = 0xFFFF;
2612 fcport->d_id.b.domain = remote_did.b.domain;
2613 fcport->d_id.b.area = remote_did.b.area;
2614 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2616 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2617 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2619 sp->type = SRB_ELS_DCMD;
2620 sp->name = "ELS_DCMD";
2621 sp->fcport = fcport;
2622 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2623 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2624 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2625 sp->done = qla2x00_els_dcmd_sp_done;
2626 sp->free = qla2x00_els_dcmd_sp_free;
2628 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2629 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2632 if (!elsio->u.els_logo.els_logo_pyld) {
2634 return QLA_FUNCTION_FAILED;
2637 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2639 elsio->u.els_logo.els_cmd = els_opcode;
2640 logo_pyld.opcode = els_opcode;
2641 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2642 logo_pyld.s_id[1] = vha->d_id.b.area;
2643 logo_pyld.s_id[2] = vha->d_id.b.domain;
2644 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2645 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2647 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2648 sizeof(struct els_logo_payload));
2649 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2650 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2651 elsio->u.els_logo.els_logo_pyld,
2652 sizeof(*elsio->u.els_logo.els_logo_pyld));
2654 rval = qla2x00_start_sp(sp);
2655 if (rval != QLA_SUCCESS) {
2657 return QLA_FUNCTION_FAILED;
2660 ql_dbg(ql_dbg_io, vha, 0x3074,
2661 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2662 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2663 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2665 wait_for_completion(&elsio->u.els_logo.comp);
2672 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2674 scsi_qla_host_t *vha = sp->vha;
2675 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2677 els_iocb->entry_type = ELS_IOCB_TYPE;
2678 els_iocb->entry_count = 1;
2679 els_iocb->sys_define = 0;
2680 els_iocb->entry_status = 0;
2681 els_iocb->handle = sp->handle;
2682 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2683 els_iocb->tx_dsd_count = cpu_to_le16(1);
2684 els_iocb->vp_index = vha->vp_idx;
2685 els_iocb->sof_type = EST_SOFI3;
2686 els_iocb->rx_dsd_count = 0;
2687 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2689 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2690 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2691 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2692 /* For SID the byte order is different than DID */
2693 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2694 els_iocb->s_id[2] = vha->d_id.b.area;
2695 els_iocb->s_id[0] = vha->d_id.b.domain;
2697 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2698 els_iocb->control_flags = 0;
2699 els_iocb->tx_byte_count = els_iocb->tx_len =
2700 cpu_to_le32(sizeof(struct els_plogi_payload));
2701 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2702 &els_iocb->tx_address);
2703 els_iocb->rx_dsd_count = cpu_to_le16(1);
2704 els_iocb->rx_byte_count = els_iocb->rx_len =
2705 cpu_to_le32(sizeof(struct els_plogi_payload));
2706 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2707 &els_iocb->rx_address);
2709 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2710 "PLOGI ELS IOCB:\n");
2711 ql_dump_buffer(ql_log_info, vha, 0x0109,
2712 (uint8_t *)els_iocb,
2715 els_iocb->control_flags = cpu_to_le16(1 << 13);
2716 els_iocb->tx_byte_count =
2717 cpu_to_le32(sizeof(struct els_logo_payload));
2718 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2719 &els_iocb->tx_address);
2720 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2722 els_iocb->rx_byte_count = 0;
2723 els_iocb->rx_address = 0;
2724 els_iocb->rx_len = 0;
2725 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2727 ql_dump_buffer(ql_log_info, vha, 0x010b,
2732 sp->vha->qla_stats.control_requests++;
2736 qla2x00_els_dcmd2_iocb_timeout(void *data)
2739 fc_port_t *fcport = sp->fcport;
2740 struct scsi_qla_host *vha = sp->vha;
2741 unsigned long flags = 0;
2744 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2745 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2746 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2748 /* Abort the exchange */
2749 res = qla24xx_async_abort_cmd(sp, false);
2750 ql_dbg(ql_dbg_io, vha, 0x3070,
2751 "mbx abort_command %s\n",
2752 (res == QLA_SUCCESS) ? "successful" : "failed");
2754 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2755 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2756 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2757 sp->qpair->req->outstanding_cmds[h] = NULL;
2761 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2762 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2766 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2768 if (els_plogi->els_plogi_pyld)
2769 dma_free_coherent(&vha->hw->pdev->dev,
2771 els_plogi->els_plogi_pyld,
2772 els_plogi->els_plogi_pyld_dma);
2774 if (els_plogi->els_resp_pyld)
2775 dma_free_coherent(&vha->hw->pdev->dev,
2777 els_plogi->els_resp_pyld,
2778 els_plogi->els_resp_pyld_dma);
2781 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2783 fc_port_t *fcport = sp->fcport;
2784 struct srb_iocb *lio = &sp->u.iocb_cmd;
2785 struct scsi_qla_host *vha = sp->vha;
2786 struct event_arg ea;
2787 struct qla_work_evt *e;
2788 struct fc_port *conflict_fcport;
2789 port_id_t cid; /* conflict Nport id */
2790 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2793 ql_dbg(ql_dbg_disc, vha, 0x3072,
2794 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2795 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2797 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2798 del_timer(&sp->u.iocb_cmd.timer);
2800 if (sp->flags & SRB_WAKEUP_ON_COMP)
2801 complete(&lio->u.els_plogi.comp);
2803 switch (le32_to_cpu(fw_status[0])) {
2804 case CS_DATA_UNDERRUN:
2806 memset(&ea, 0, sizeof(ea));
2809 qla_handle_els_plogi_done(vha, &ea);
2813 switch (le32_to_cpu(fw_status[1])) {
2814 case LSC_SCODE_PORTID_USED:
2815 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2816 qlt_find_sess_invalidate_other(vha,
2817 wwn_to_u64(fcport->port_name),
2818 fcport->d_id, lid, &conflict_fcport);
2819 if (conflict_fcport) {
2821 * Another fcport shares the same
2822 * loop_id & nport id; conflict
2823 * fcport needs to finish cleanup
2824 * before this fcport can proceed
2827 conflict_fcport->conflict = fcport;
2828 fcport->login_pause = 1;
2829 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2830 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2833 fcport->d_id.b24, lid);
2835 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2836 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2839 fcport->d_id.b24, lid);
2840 qla2x00_clear_loop_id(fcport);
2841 set_bit(lid, vha->hw->loop_id_map);
2842 fcport->loop_id = lid;
2843 fcport->keep_nport_handle = 0;
2844 qlt_schedule_sess_for_deletion(fcport);
2848 case LSC_SCODE_NPORT_USED:
2849 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2851 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2853 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
2856 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2857 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2858 __func__, __LINE__, fcport->port_name,
2859 fcport->loop_id, cid.b24);
2860 set_bit(fcport->loop_id,
2861 vha->hw->loop_id_map);
2862 fcport->loop_id = FC_NO_LOOP_ID;
2863 qla24xx_post_gnl_work(vha, fcport);
2866 case LSC_SCODE_NOXCB:
2867 vha->hw->exch_starvation++;
2868 if (vha->hw->exch_starvation > 5) {
2869 ql_log(ql_log_warn, vha, 0xd046,
2870 "Exchange starvation. Resetting RISC\n");
2871 vha->hw->exch_starvation = 0;
2872 set_bit(ISP_ABORT_NEEDED,
2874 qla2xxx_wake_dpc(vha);
2878 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2879 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2880 __func__, sp->fcport->port_name,
2881 fw_status[0], fw_status[1], fw_status[2]);
2883 fcport->flags &= ~FCF_ASYNC_SENT;
2884 qla2x00_set_fcport_disc_state(fcport,
2886 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2892 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2893 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2894 __func__, sp->fcport->port_name,
2895 fw_status[0], fw_status[1], fw_status[2]);
2897 sp->fcport->flags &= ~FCF_ASYNC_SENT;
2898 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2899 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2903 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2905 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2907 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2912 qla2x00_post_work(vha, e);
2917 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2918 fc_port_t *fcport, bool wait)
2921 struct srb_iocb *elsio = NULL;
2922 struct qla_hw_data *ha = vha->hw;
2923 int rval = QLA_SUCCESS;
2924 void *ptr, *resp_ptr;
2926 /* Alloc SRB structure */
2927 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2929 ql_log(ql_log_info, vha, 0x70e6,
2930 "SRB allocation failed\n");
2931 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2935 fcport->flags |= FCF_ASYNC_SENT;
2936 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2937 elsio = &sp->u.iocb_cmd;
2938 ql_dbg(ql_dbg_io, vha, 0x3073,
2939 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2941 sp->type = SRB_ELS_DCMD;
2942 sp->name = "ELS_DCMD";
2943 sp->fcport = fcport;
2945 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2947 sp->flags = SRB_WAKEUP_ON_COMP;
2949 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2951 sp->done = qla2x00_els_dcmd2_sp_done;
2952 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2954 ptr = elsio->u.els_plogi.els_plogi_pyld =
2955 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
2956 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2958 if (!elsio->u.els_plogi.els_plogi_pyld) {
2959 rval = QLA_FUNCTION_FAILED;
2963 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2964 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
2965 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2967 if (!elsio->u.els_plogi.els_resp_pyld) {
2968 rval = QLA_FUNCTION_FAILED;
2972 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2974 memset(ptr, 0, sizeof(struct els_plogi_payload));
2975 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2976 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2977 &ha->plogi_els_payld.data,
2978 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2980 elsio->u.els_plogi.els_cmd = els_opcode;
2981 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2983 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2984 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2985 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2986 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2988 init_completion(&elsio->u.els_plogi.comp);
2989 rval = qla2x00_start_sp(sp);
2990 if (rval != QLA_SUCCESS) {
2991 rval = QLA_FUNCTION_FAILED;
2993 ql_dbg(ql_dbg_disc, vha, 0x3074,
2994 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2995 sp->name, sp->handle, fcport->loop_id,
2996 fcport->d_id.b24, vha->d_id.b24);
3000 wait_for_completion(&elsio->u.els_plogi.comp);
3002 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3003 rval = QLA_FUNCTION_FAILED;
3009 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3010 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3017 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3019 struct bsg_job *bsg_job = sp->u.bsg_job;
3020 struct fc_bsg_request *bsg_request = bsg_job->request;
3022 els_iocb->entry_type = ELS_IOCB_TYPE;
3023 els_iocb->entry_count = 1;
3024 els_iocb->sys_define = 0;
3025 els_iocb->entry_status = 0;
3026 els_iocb->handle = sp->handle;
3027 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3028 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3029 els_iocb->vp_index = sp->vha->vp_idx;
3030 els_iocb->sof_type = EST_SOFI3;
3031 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3034 sp->type == SRB_ELS_CMD_RPT ?
3035 bsg_request->rqst_data.r_els.els_code :
3036 bsg_request->rqst_data.h_els.command_code;
3037 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3038 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3039 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3040 els_iocb->control_flags = 0;
3041 els_iocb->rx_byte_count =
3042 cpu_to_le32(bsg_job->reply_payload.payload_len);
3043 els_iocb->tx_byte_count =
3044 cpu_to_le32(bsg_job->request_payload.payload_len);
3046 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3047 &els_iocb->tx_address);
3048 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3049 (bsg_job->request_payload.sg_list));
3051 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3052 &els_iocb->rx_address);
3053 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3054 (bsg_job->reply_payload.sg_list));
3056 sp->vha->qla_stats.control_requests++;
3060 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3062 uint16_t avail_dsds;
3063 struct dsd64 *cur_dsd;
3064 struct scatterlist *sg;
3067 scsi_qla_host_t *vha = sp->vha;
3068 struct qla_hw_data *ha = vha->hw;
3069 struct bsg_job *bsg_job = sp->u.bsg_job;
3070 int entry_count = 1;
3072 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3073 ct_iocb->entry_type = CT_IOCB_TYPE;
3074 ct_iocb->entry_status = 0;
3075 ct_iocb->handle1 = sp->handle;
3076 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3077 ct_iocb->status = cpu_to_le16(0);
3078 ct_iocb->control_flags = cpu_to_le16(0);
3079 ct_iocb->timeout = 0;
3080 ct_iocb->cmd_dsd_count =
3081 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3082 ct_iocb->total_dsd_count =
3083 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3084 ct_iocb->req_bytecount =
3085 cpu_to_le32(bsg_job->request_payload.payload_len);
3086 ct_iocb->rsp_bytecount =
3087 cpu_to_le32(bsg_job->reply_payload.payload_len);
3089 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3090 &ct_iocb->req_dsd.address);
3091 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3093 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3094 &ct_iocb->rsp_dsd.address);
3095 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3098 cur_dsd = &ct_iocb->rsp_dsd;
3100 tot_dsds = bsg_job->reply_payload.sg_cnt;
3102 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3103 cont_a64_entry_t *cont_pkt;
3105 /* Allocate additional continuation packets? */
3106 if (avail_dsds == 0) {
3108 * Five DSDs are available in the Cont.
3111 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3112 vha->hw->req_q_map[0]);
3113 cur_dsd = cont_pkt->dsd;
3118 append_dsd64(&cur_dsd, sg);
3121 ct_iocb->entry_count = entry_count;
3123 sp->vha->qla_stats.control_requests++;
3127 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3129 uint16_t avail_dsds;
3130 struct dsd64 *cur_dsd;
3131 struct scatterlist *sg;
3133 uint16_t cmd_dsds, rsp_dsds;
3134 scsi_qla_host_t *vha = sp->vha;
3135 struct qla_hw_data *ha = vha->hw;
3136 struct bsg_job *bsg_job = sp->u.bsg_job;
3137 int entry_count = 1;
3138 cont_a64_entry_t *cont_pkt = NULL;
3140 ct_iocb->entry_type = CT_IOCB_TYPE;
3141 ct_iocb->entry_status = 0;
3142 ct_iocb->sys_define = 0;
3143 ct_iocb->handle = sp->handle;
3145 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3146 ct_iocb->vp_index = sp->vha->vp_idx;
3147 ct_iocb->comp_status = cpu_to_le16(0);
3149 cmd_dsds = bsg_job->request_payload.sg_cnt;
3150 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3152 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3153 ct_iocb->timeout = 0;
3154 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3155 ct_iocb->cmd_byte_count =
3156 cpu_to_le32(bsg_job->request_payload.payload_len);
3159 cur_dsd = ct_iocb->dsd;
3162 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3163 /* Allocate additional continuation packets? */
3164 if (avail_dsds == 0) {
3166 * Five DSDs are available in the Cont.
3169 cont_pkt = qla2x00_prep_cont_type1_iocb(
3170 vha, ha->req_q_map[0]);
3171 cur_dsd = cont_pkt->dsd;
3176 append_dsd64(&cur_dsd, sg);
3182 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3183 /* Allocate additional continuation packets? */
3184 if (avail_dsds == 0) {
3186 * Five DSDs are available in the Cont.
3189 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3191 cur_dsd = cont_pkt->dsd;
3196 append_dsd64(&cur_dsd, sg);
3199 ct_iocb->entry_count = entry_count;
3203 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3204 * @sp: command to send to the ISP
3206 * Returns non-zero if a failure occurred, else zero.
3209 qla82xx_start_scsi(srb_t *sp)
3212 unsigned long flags;
3213 struct scsi_cmnd *cmd;
3219 struct device_reg_82xx __iomem *reg;
3222 uint8_t additional_cdb_len;
3223 struct ct6_dsd *ctx;
3224 struct scsi_qla_host *vha = sp->vha;
3225 struct qla_hw_data *ha = vha->hw;
3226 struct req_que *req = NULL;
3227 struct rsp_que *rsp = NULL;
3229 /* Setup device pointers. */
3230 reg = &ha->iobase->isp82;
3231 cmd = GET_CMD_SP(sp);
3233 rsp = ha->rsp_q_map[0];
3235 /* So we know we haven't pci_map'ed anything yet */
3238 dbval = 0x04 | (ha->portnum << 5);
3240 /* Send marker if required */
3241 if (vha->marker_needed != 0) {
3242 if (qla2x00_marker(vha, ha->base_qpair,
3243 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3244 ql_log(ql_log_warn, vha, 0x300c,
3245 "qla2x00_marker failed for cmd=%p.\n", cmd);
3246 return QLA_FUNCTION_FAILED;
3248 vha->marker_needed = 0;
3251 /* Acquire ring specific lock */
3252 spin_lock_irqsave(&ha->hardware_lock, flags);
3254 handle = qla2xxx_get_next_handle(req);
3258 /* Map the sg table so we have an accurate count of sg entries needed */
3259 if (scsi_sg_count(cmd)) {
3260 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3261 scsi_sg_count(cmd), cmd->sc_data_direction);
3262 if (unlikely(!nseg))
3269 if (tot_dsds > ql2xshiftctondsd) {
3270 struct cmd_type_6 *cmd_pkt;
3271 uint16_t more_dsd_lists = 0;
3272 struct dsd_dma *dsd_ptr;
3275 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3276 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3277 ql_dbg(ql_dbg_io, vha, 0x300d,
3278 "Num of DSD list %d is than %d for cmd=%p.\n",
3279 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3284 if (more_dsd_lists <= ha->gbl_dsd_avail)
3285 goto sufficient_dsds;
3287 more_dsd_lists -= ha->gbl_dsd_avail;
3289 for (i = 0; i < more_dsd_lists; i++) {
3290 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3292 ql_log(ql_log_fatal, vha, 0x300e,
3293 "Failed to allocate memory for dsd_dma "
3294 "for cmd=%p.\n", cmd);
3298 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3299 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3300 if (!dsd_ptr->dsd_addr) {
3302 ql_log(ql_log_fatal, vha, 0x300f,
3303 "Failed to allocate memory for dsd_addr "
3304 "for cmd=%p.\n", cmd);
3307 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3308 ha->gbl_dsd_avail++;
3314 if (req->cnt < (req_cnt + 2)) {
3315 cnt = (uint16_t)rd_reg_dword_relaxed(
3316 ®->req_q_out[0]);
3317 if (req->ring_index < cnt)
3318 req->cnt = cnt - req->ring_index;
3320 req->cnt = req->length -
3321 (req->ring_index - cnt);
3322 if (req->cnt < (req_cnt + 2))
3326 ctx = sp->u.scmd.ct6_ctx =
3327 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3329 ql_log(ql_log_fatal, vha, 0x3010,
3330 "Failed to allocate ctx for cmd=%p.\n", cmd);
3334 memset(ctx, 0, sizeof(struct ct6_dsd));
3335 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3336 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3337 if (!ctx->fcp_cmnd) {
3338 ql_log(ql_log_fatal, vha, 0x3011,
3339 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3343 /* Initialize the DSD list and dma handle */
3344 INIT_LIST_HEAD(&ctx->dsd_list);
3345 ctx->dsd_use_cnt = 0;
3347 if (cmd->cmd_len > 16) {
3348 additional_cdb_len = cmd->cmd_len - 16;
3349 if ((cmd->cmd_len % 4) != 0) {
3350 /* SCSI command bigger than 16 bytes must be
3353 ql_log(ql_log_warn, vha, 0x3012,
3354 "scsi cmd len %d not multiple of 4 "
3355 "for cmd=%p.\n", cmd->cmd_len, cmd);
3356 goto queuing_error_fcp_cmnd;
3358 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3360 additional_cdb_len = 0;
3361 ctx->fcp_cmnd_len = 12 + 16 + 4;
3364 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3365 cmd_pkt->handle = make_handle(req->id, handle);
3367 /* Zero out remaining portion of packet. */
3368 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3369 clr_ptr = (uint32_t *)cmd_pkt + 2;
3370 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3371 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3373 /* Set NPORT-ID and LUN number*/
3374 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3375 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3376 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3377 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3378 cmd_pkt->vp_index = sp->vha->vp_idx;
3380 /* Build IOCB segments */
3381 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3382 goto queuing_error_fcp_cmnd;
3384 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3385 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3387 /* build FCP_CMND IU */
3388 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3389 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3391 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3392 ctx->fcp_cmnd->additional_cdb_len |= 1;
3393 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3394 ctx->fcp_cmnd->additional_cdb_len |= 2;
3396 /* Populate the FCP_PRIO. */
3397 if (ha->flags.fcp_prio_enabled)
3398 ctx->fcp_cmnd->task_attribute |=
3399 sp->fcport->fcp_prio << 3;
3401 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3403 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3404 additional_cdb_len);
3405 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3407 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3408 put_unaligned_le64(ctx->fcp_cmnd_dma,
3409 &cmd_pkt->fcp_cmnd_dseg_address);
3411 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3412 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3413 /* Set total data segment count. */
3414 cmd_pkt->entry_count = (uint8_t)req_cnt;
3415 /* Specify response queue number where
3416 * completion should happen
3418 cmd_pkt->entry_status = (uint8_t) rsp->id;
3420 struct cmd_type_7 *cmd_pkt;
3422 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3423 if (req->cnt < (req_cnt + 2)) {
3424 cnt = (uint16_t)rd_reg_dword_relaxed(
3425 ®->req_q_out[0]);
3426 if (req->ring_index < cnt)
3427 req->cnt = cnt - req->ring_index;
3429 req->cnt = req->length -
3430 (req->ring_index - cnt);
3432 if (req->cnt < (req_cnt + 2))
3435 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3436 cmd_pkt->handle = make_handle(req->id, handle);
3438 /* Zero out remaining portion of packet. */
3439 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3440 clr_ptr = (uint32_t *)cmd_pkt + 2;
3441 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3442 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3444 /* Set NPORT-ID and LUN number*/
3445 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3446 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3447 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3448 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3449 cmd_pkt->vp_index = sp->vha->vp_idx;
3451 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3452 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3453 sizeof(cmd_pkt->lun));
3455 /* Populate the FCP_PRIO. */
3456 if (ha->flags.fcp_prio_enabled)
3457 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3459 /* Load SCSI command packet. */
3460 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3461 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3463 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3465 /* Build IOCB segments */
3466 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3468 /* Set total data segment count. */
3469 cmd_pkt->entry_count = (uint8_t)req_cnt;
3470 /* Specify response queue number where
3471 * completion should happen.
3473 cmd_pkt->entry_status = (uint8_t) rsp->id;
3476 /* Build command packet. */
3477 req->current_outstanding_cmd = handle;
3478 req->outstanding_cmds[handle] = sp;
3479 sp->handle = handle;
3480 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3481 req->cnt -= req_cnt;
3484 /* Adjust ring index. */
3486 if (req->ring_index == req->length) {
3487 req->ring_index = 0;
3488 req->ring_ptr = req->ring;
3492 sp->flags |= SRB_DMA_VALID;
3494 /* Set chip new ring index. */
3495 /* write, read and verify logic */
3496 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3498 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3500 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3502 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3503 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3508 /* Manage unprocessed RIO/ZIO commands in response queue. */
3509 if (vha->flags.process_response_queue &&
3510 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3511 qla24xx_process_response_queue(vha, rsp);
3513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3516 queuing_error_fcp_cmnd:
3517 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3520 scsi_dma_unmap(cmd);
3522 if (sp->u.scmd.crc_ctx) {
3523 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3524 sp->u.scmd.crc_ctx = NULL;
3526 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3528 return QLA_FUNCTION_FAILED;
3532 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3534 struct srb_iocb *aio = &sp->u.iocb_cmd;
3535 scsi_qla_host_t *vha = sp->vha;
3536 struct req_que *req = sp->qpair->req;
3538 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3539 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3540 abt_iocb->entry_count = 1;
3541 abt_iocb->handle = make_handle(req->id, sp->handle);
3543 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3544 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3545 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3546 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3548 abt_iocb->handle_to_abort =
3549 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3550 aio->u.abt.cmd_hndl);
3551 abt_iocb->vp_index = vha->vp_idx;
3552 abt_iocb->req_que_no = aio->u.abt.req_que_no;
3553 /* Send the command to the firmware */
3558 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3562 mbx->entry_type = MBX_IOCB_TYPE;
3563 mbx->handle = sp->handle;
3564 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3566 for (i = 0; i < sz; i++)
3567 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3571 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3573 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3574 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3575 ct_pkt->handle = sp->handle;
3578 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3579 struct nack_to_isp *nack)
3581 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3583 nack->entry_type = NOTIFY_ACK_TYPE;
3584 nack->entry_count = 1;
3585 nack->ox_id = ntfy->ox_id;
3587 nack->u.isp24.handle = sp->handle;
3588 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3589 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3590 nack->u.isp24.flags = ntfy->u.isp24.flags &
3591 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3593 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3594 nack->u.isp24.status = ntfy->u.isp24.status;
3595 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3596 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3597 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3598 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3599 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3600 nack->u.isp24.srr_flags = 0;
3601 nack->u.isp24.srr_reject_code = 0;
3602 nack->u.isp24.srr_reject_code_expl = 0;
3603 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3607 * Build NVME LS request
3610 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3612 struct srb_iocb *nvme;
3614 nvme = &sp->u.iocb_cmd;
3615 cmd_pkt->entry_type = PT_LS4_REQUEST;
3616 cmd_pkt->entry_count = 1;
3617 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3619 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3620 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3621 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3623 cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3624 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3625 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3626 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3628 cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3629 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3630 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3631 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3635 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3639 vce->entry_type = VP_CTRL_IOCB_TYPE;
3640 vce->handle = sp->handle;
3641 vce->entry_count = 1;
3642 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3643 vce->vp_count = cpu_to_le16(1);
3646 * index map in firmware starts with 1; decrement index
3647 * this is ok as we never use index 0
3649 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3650 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3651 vce->vp_idx_map[map] |= 1 << pos;
3655 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3657 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3658 logio->control_flags =
3659 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3661 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3662 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3663 logio->port_id[1] = sp->fcport->d_id.b.area;
3664 logio->port_id[2] = sp->fcport->d_id.b.domain;
3665 logio->vp_index = sp->fcport->vha->vp_idx;
3669 qla2x00_start_sp(srb_t *sp)
3671 int rval = QLA_SUCCESS;
3672 scsi_qla_host_t *vha = sp->vha;
3673 struct qla_hw_data *ha = vha->hw;
3674 struct qla_qpair *qp = sp->qpair;
3676 unsigned long flags;
3678 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3679 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3682 ql_log(ql_log_warn, vha, 0x700c,
3683 "qla2x00_alloc_iocbs failed.\n");
3689 IS_FWI2_CAPABLE(ha) ?
3690 qla24xx_login_iocb(sp, pkt) :
3691 qla2x00_login_iocb(sp, pkt);
3694 qla24xx_prli_iocb(sp, pkt);
3696 case SRB_LOGOUT_CMD:
3697 IS_FWI2_CAPABLE(ha) ?
3698 qla24xx_logout_iocb(sp, pkt) :
3699 qla2x00_logout_iocb(sp, pkt);
3701 case SRB_ELS_CMD_RPT:
3702 case SRB_ELS_CMD_HST:
3703 qla24xx_els_iocb(sp, pkt);
3706 IS_FWI2_CAPABLE(ha) ?
3707 qla24xx_ct_iocb(sp, pkt) :
3708 qla2x00_ct_iocb(sp, pkt);
3711 IS_FWI2_CAPABLE(ha) ?
3712 qla24xx_adisc_iocb(sp, pkt) :
3713 qla2x00_adisc_iocb(sp, pkt);
3717 qlafx00_tm_iocb(sp, pkt) :
3718 qla24xx_tm_iocb(sp, pkt);
3720 case SRB_FXIOCB_DCMD:
3721 case SRB_FXIOCB_BCMD:
3722 qlafx00_fxdisc_iocb(sp, pkt);
3725 qla_nvme_ls(sp, pkt);
3729 qlafx00_abort_iocb(sp, pkt) :
3730 qla24xx_abort_iocb(sp, pkt);
3733 qla24xx_els_logo_iocb(sp, pkt);
3735 case SRB_CT_PTHRU_CMD:
3736 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3739 qla2x00_mb_iocb(sp, pkt);
3741 case SRB_NACK_PLOGI:
3744 qla2x00_send_notify_ack_iocb(sp, pkt);
3747 qla25xx_ctrlvp_iocb(sp, pkt);
3750 qla24xx_prlo_iocb(sp, pkt);
3756 if (sp->start_timer)
3757 add_timer(&sp->u.iocb_cmd.timer);
3760 qla2x00_start_iocbs(vha, qp->req);
3762 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3767 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3768 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3770 uint16_t avail_dsds;
3771 struct dsd64 *cur_dsd;
3772 uint32_t req_data_len = 0;
3773 uint32_t rsp_data_len = 0;
3774 struct scatterlist *sg;
3776 int entry_count = 1;
3777 struct bsg_job *bsg_job = sp->u.bsg_job;
3779 /*Update entry type to indicate bidir command */
3780 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3782 /* Set the transfer direction, in this set both flags
3783 * Also set the BD_WRAP_BACK flag, firmware will take care
3784 * assigning DID=SID for outgoing pkts.
3786 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3787 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3788 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3791 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3792 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3793 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3794 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3796 vha->bidi_stats.transfer_bytes += req_data_len;
3797 vha->bidi_stats.io_count++;
3799 vha->qla_stats.output_bytes += req_data_len;
3800 vha->qla_stats.output_requests++;
3802 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3803 * are bundled in continuation iocb
3806 cur_dsd = &cmd_pkt->fcp_dsd;
3810 for_each_sg(bsg_job->request_payload.sg_list, sg,
3811 bsg_job->request_payload.sg_cnt, index) {
3812 cont_a64_entry_t *cont_pkt;
3814 /* Allocate additional continuation packets */
3815 if (avail_dsds == 0) {
3816 /* Continuation type 1 IOCB can accomodate
3819 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3820 cur_dsd = cont_pkt->dsd;
3824 append_dsd64(&cur_dsd, sg);
3827 /* For read request DSD will always goes to continuation IOCB
3828 * and follow the write DSD. If there is room on the current IOCB
3829 * then it is added to that IOCB else new continuation IOCB is
3832 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3833 bsg_job->reply_payload.sg_cnt, index) {
3834 cont_a64_entry_t *cont_pkt;
3836 /* Allocate additional continuation packets */
3837 if (avail_dsds == 0) {
3838 /* Continuation type 1 IOCB can accomodate
3841 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3842 cur_dsd = cont_pkt->dsd;
3846 append_dsd64(&cur_dsd, sg);
3849 /* This value should be same as number of IOCB required for this cmd */
3850 cmd_pkt->entry_count = entry_count;
3854 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3857 struct qla_hw_data *ha = vha->hw;
3858 unsigned long flags;
3863 struct cmd_bidir *cmd_pkt = NULL;
3864 struct rsp_que *rsp;
3865 struct req_que *req;
3866 int rval = EXT_STATUS_OK;
3870 rsp = ha->rsp_q_map[0];
3873 /* Send marker if required */
3874 if (vha->marker_needed != 0) {
3875 if (qla2x00_marker(vha, ha->base_qpair,
3876 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3877 return EXT_STATUS_MAILBOX;
3878 vha->marker_needed = 0;
3881 /* Acquire ring specific lock */
3882 spin_lock_irqsave(&ha->hardware_lock, flags);
3884 handle = qla2xxx_get_next_handle(req);
3886 rval = EXT_STATUS_BUSY;
3890 /* Calculate number of IOCB required */
3891 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3893 /* Check for room on request queue. */
3894 if (req->cnt < req_cnt + 2) {
3895 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3896 rd_reg_dword_relaxed(req->req_q_out);
3897 if (req->ring_index < cnt)
3898 req->cnt = cnt - req->ring_index;
3900 req->cnt = req->length -
3901 (req->ring_index - cnt);
3903 if (req->cnt < req_cnt + 2) {
3904 rval = EXT_STATUS_BUSY;
3908 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3909 cmd_pkt->handle = make_handle(req->id, handle);
3911 /* Zero out remaining portion of packet. */
3912 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3913 clr_ptr = (uint32_t *)cmd_pkt + 2;
3914 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3916 /* Set NPORT-ID (of vha)*/
3917 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3918 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3919 cmd_pkt->port_id[1] = vha->d_id.b.area;
3920 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3922 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3923 cmd_pkt->entry_status = (uint8_t) rsp->id;
3924 /* Build command packet. */
3925 req->current_outstanding_cmd = handle;
3926 req->outstanding_cmds[handle] = sp;
3927 sp->handle = handle;
3928 req->cnt -= req_cnt;
3930 /* Send the command to the firmware */
3932 qla2x00_start_iocbs(vha, req);
3934 spin_unlock_irqrestore(&ha->hardware_lock, flags);