1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * Returns the proper CF_* direction based on CDB.
20 static inline uint16_t
21 qla2x00_get_cmd_direction(srb_t *sp)
24 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 struct scsi_qla_host *vha = sp->vha;
29 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 vha->qla_stats.output_requests++;
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 vha->qla_stats.input_requests++;
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment descriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment descriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @req: request queue
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
150 /* We always use DIFF Bundling for best performance */
153 /* Translate SCSI opcode to a protection opcode */
154 switch (scsi_get_prot_op(cmd)) {
155 case SCSI_PROT_READ_STRIP:
156 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 case SCSI_PROT_WRITE_INSERT:
159 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 case SCSI_PROT_READ_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 case SCSI_PROT_WRITE_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 case SCSI_PROT_READ_PASS:
168 case SCSI_PROT_WRITE_PASS:
169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 default: /* Normal Request */
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 return scsi_prot_sg_count(cmd);
183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184 * capable IOCB types.
186 * @sp: SRB command to process
187 * @cmd_pkt: Command type 2 IOCB
188 * @tot_dsds: Total number of segments to transfer
190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194 struct dsd32 *cur_dsd;
195 scsi_qla_host_t *vha;
196 struct scsi_cmnd *cmd;
197 struct scatterlist *sg;
200 cmd = GET_CMD_SP(sp);
202 /* Update entry type to indicate Command Type 2 IOCB */
203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
205 /* No data transfer */
206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
207 cmd_pkt->byte_count = cpu_to_le32(0);
212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214 /* Three DSDs are available in the Command Type 2 IOCB */
215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
216 cur_dsd = cmd_pkt->dsd32;
218 /* Load data segments */
219 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
220 cont_entry_t *cont_pkt;
222 /* Allocate additional continuation packets? */
223 if (avail_dsds == 0) {
225 * Seven DSDs are available in the Continuation
228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
229 cur_dsd = cont_pkt->dsd;
230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
233 append_dsd32(&cur_dsd, sg);
239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
240 * capable IOCB types.
242 * @sp: SRB command to process
243 * @cmd_pkt: Command type 3 IOCB
244 * @tot_dsds: Total number of segments to transfer
246 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 struct dsd64 *cur_dsd;
251 scsi_qla_host_t *vha;
252 struct scsi_cmnd *cmd;
253 struct scatterlist *sg;
256 cmd = GET_CMD_SP(sp);
258 /* Update entry type to indicate Command Type 3 IOCB */
259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
261 /* No data transfer */
262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
263 cmd_pkt->byte_count = cpu_to_le32(0);
268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
270 /* Two DSDs are available in the Command Type 3 IOCB */
271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
272 cur_dsd = cmd_pkt->dsd64;
274 /* Load data segments */
275 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
276 cont_a64_entry_t *cont_pkt;
278 /* Allocate additional continuation packets? */
279 if (avail_dsds == 0) {
281 * Five DSDs are available in the Continuation
284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
285 cur_dsd = cont_pkt->dsd;
286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
289 append_dsd64(&cur_dsd, sg);
295 * Find the first handle that is not in use, starting from
296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
297 * associated with @req.
299 uint32_t qla2xxx_get_next_handle(struct req_que *req)
301 uint32_t index, handle = req->current_outstanding_cmd;
303 for (index = 1; index < req->num_outstanding_cmds; index++) {
305 if (handle == req->num_outstanding_cmds)
307 if (!req->outstanding_cmds[handle])
315 * qla2x00_start_scsi() - Send a SCSI command to the ISP
316 * @sp: command to send to the ISP
318 * Returns non-zero if a failure occurred, else zero.
321 qla2x00_start_scsi(srb_t *sp)
325 scsi_qla_host_t *vha;
326 struct scsi_cmnd *cmd;
329 cmd_entry_t *cmd_pkt;
333 struct device_reg_2xxx __iomem *reg;
334 struct qla_hw_data *ha;
338 /* Setup device pointers. */
341 reg = &ha->iobase->isp;
342 cmd = GET_CMD_SP(sp);
343 req = ha->req_q_map[0];
344 rsp = ha->rsp_q_map[0];
345 /* So we know we haven't pci_map'ed anything yet */
348 /* Send marker if required */
349 if (vha->marker_needed != 0) {
350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
352 return (QLA_FUNCTION_FAILED);
354 vha->marker_needed = 0;
357 /* Acquire ring specific lock */
358 spin_lock_irqsave(&ha->hardware_lock, flags);
360 handle = qla2xxx_get_next_handle(req);
364 /* Map the sg table so we have an accurate count of sg entries needed */
365 if (scsi_sg_count(cmd)) {
366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
367 scsi_sg_count(cmd), cmd->sc_data_direction);
375 /* Calculate the number of request entries needed. */
376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
377 if (req->cnt < (req_cnt + 2)) {
378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
379 if (req->ring_index < cnt)
380 req->cnt = cnt - req->ring_index;
382 req->cnt = req->length -
383 (req->ring_index - cnt);
384 /* If still no head room then bail out */
385 if (req->cnt < (req_cnt + 2))
389 /* Build command packet */
390 req->current_outstanding_cmd = handle;
391 req->outstanding_cmds[handle] = sp;
393 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
396 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
397 cmd_pkt->handle = handle;
398 /* Zero out remaining portion of packet. */
399 clr_ptr = (uint32_t *)cmd_pkt + 2;
400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403 /* Set target ID and LUN number*/
404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
408 /* Load SCSI command packet. */
409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
412 /* Build IOCB segments */
413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
415 /* Set total data segment count. */
416 cmd_pkt->entry_count = (uint8_t)req_cnt;
419 /* Adjust ring index. */
421 if (req->ring_index == req->length) {
423 req->ring_ptr = req->ring;
427 sp->flags |= SRB_DMA_VALID;
429 /* Set chip new ring index. */
430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
433 /* Manage unprocessed RIO/ZIO commands in response queue. */
434 if (vha->flags.process_response_queue &&
435 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
436 qla2x00_process_response_queue(rsp);
438 spin_unlock_irqrestore(&ha->hardware_lock, flags);
439 return (QLA_SUCCESS);
445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
447 return (QLA_FUNCTION_FAILED);
451 * qla2x00_start_iocbs() - Execute the IOCB command
453 * @req: request queue
456 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
458 struct qla_hw_data *ha = vha->hw;
459 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
461 if (IS_P3P_TYPE(ha)) {
462 qla82xx_start_iocbs(vha);
464 /* Adjust ring index. */
466 if (req->ring_index == req->length) {
468 req->ring_ptr = req->ring;
472 /* Set chip new ring index. */
473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
474 wrt_reg_dword(req->req_q_in, req->ring_index);
475 } else if (IS_QLA83XX(ha)) {
476 wrt_reg_dword(req->req_q_in, req->ring_index);
477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
478 } else if (IS_QLAFX00(ha)) {
479 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index);
480 rd_reg_dword_relaxed(®->ispfx00.req_q_in);
481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
482 } else if (IS_FWI2_CAPABLE(ha)) {
483 wrt_reg_dword(®->isp24.req_q_in, req->ring_index);
484 rd_reg_dword_relaxed(®->isp24.req_q_in);
486 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp),
488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp));
494 * __qla2x00_marker() - Send a marker IOCB to the firmware.
496 * @qpair: queue pair pointer
499 * @type: marker modifier
501 * Can be called from both normal and interrupt context.
503 * Returns non-zero if a failure occurred, else zero.
506 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
507 uint16_t loop_id, uint64_t lun, uint8_t type)
510 struct mrk_entry_24xx *mrk24 = NULL;
511 struct req_que *req = qpair->req;
512 struct qla_hw_data *ha = vha->hw;
513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED);
523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
525 if (type != MK_SYNC_ALL) {
526 if (IS_FWI2_CAPABLE(ha)) {
527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531 mrk24->vp_index = vha->vp_idx;
532 mrk24->handle = make_handle(req->id, mrk24->handle);
534 SET_TARGET_ID(ha, mrk->target, loop_id);
535 mrk->lun = cpu_to_le16((uint16_t)lun);
540 qla2x00_start_iocbs(vha, req);
542 return (QLA_SUCCESS);
546 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
547 uint16_t loop_id, uint64_t lun, uint8_t type)
550 unsigned long flags = 0;
552 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
560 * qla2x00_issue_marker
563 * Caller CAN have hardware lock held as specified by ha_locked parameter.
564 * Might release it, then reaquire.
566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
574 MK_SYNC_ALL) != QLA_SUCCESS)
575 return QLA_FUNCTION_FAILED;
577 vha->marker_needed = 0;
583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
586 struct dsd64 *cur_dsd = NULL, *next_dsd;
587 scsi_qla_host_t *vha;
588 struct qla_hw_data *ha;
589 struct scsi_cmnd *cmd;
590 struct scatterlist *cur_seg;
592 uint8_t first_iocb = 1;
593 uint32_t dsd_list_len;
594 struct dsd_dma *dsd_ptr;
596 struct qla_qpair *qpair = sp->qpair;
598 cmd = GET_CMD_SP(sp);
600 /* Update entry type to indicate Command Type 3 IOCB */
601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
603 /* No data transfer */
604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605 cmd_pkt->byte_count = cpu_to_le32(0);
612 /* Set transfer direction */
613 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
615 qpair->counters.output_bytes += scsi_bufflen(cmd);
616 qpair->counters.output_requests++;
617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
619 qpair->counters.input_bytes += scsi_bufflen(cmd);
620 qpair->counters.input_requests++;
623 cur_seg = scsi_sglist(cmd);
624 ctx = sp->u.scmd.ct6_ctx;
627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
628 QLA_DSDS_PER_IOCB : tot_dsds;
629 tot_dsds -= avail_dsds;
630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
633 struct dsd_dma, list);
634 next_dsd = dsd_ptr->dsd_addr;
635 list_del(&dsd_ptr->list);
637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
643 put_unaligned_le64(dsd_ptr->dsd_list_dma,
644 &cmd_pkt->fcp_dsd.address);
645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
647 put_unaligned_le64(dsd_ptr->dsd_list_dma,
649 cur_dsd->length = cpu_to_le32(dsd_list_len);
654 append_dsd64(&cur_dsd, cur_seg);
655 cur_seg = sg_next(cur_seg);
660 /* Null termination */
661 cur_dsd->address = 0;
664 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670 * for Command Type 6.
672 * @dsds: number of data segment descriptors needed
674 * Returns the number of dsd list needed to store @dsds.
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
679 uint16_t dsd_lists = 0;
681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682 if (dsds % QLA_DSDS_PER_IOCB)
689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * @sp: SRB command to process
693 * @cmd_pkt: Command type 3 IOCB
694 * @tot_dsds: Total number of segments to transfer
695 * @req: pointer to request queue
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699 uint16_t tot_dsds, struct req_que *req)
702 struct dsd64 *cur_dsd;
703 scsi_qla_host_t *vha;
704 struct scsi_cmnd *cmd;
705 struct scatterlist *sg;
707 struct qla_qpair *qpair = sp->qpair;
709 cmd = GET_CMD_SP(sp);
711 /* Update entry type to indicate Command Type 3 IOCB */
712 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
714 /* No data transfer */
715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
716 cmd_pkt->byte_count = cpu_to_le32(0);
722 /* Set transfer direction */
723 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
725 qpair->counters.output_bytes += scsi_bufflen(cmd);
726 qpair->counters.output_requests++;
727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
729 qpair->counters.input_bytes += scsi_bufflen(cmd);
730 qpair->counters.input_requests++;
733 /* One DSD is available in the Command Type 3 IOCB */
735 cur_dsd = &cmd_pkt->dsd;
737 /* Load data segments */
739 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
740 cont_a64_entry_t *cont_pkt;
742 /* Allocate additional continuation packets? */
743 if (avail_dsds == 0) {
745 * Five DSDs are available in the Continuation
748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
749 cur_dsd = cont_pkt->dsd;
750 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
753 append_dsd64(&cur_dsd, sg);
758 struct fw_dif_context {
761 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
762 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
766 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
770 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
771 unsigned int protcnt)
773 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
775 switch (scsi_get_prot_type(cmd)) {
776 case SCSI_PROT_DIF_TYPE0:
778 * No check for ql2xenablehba_err_chk, as it would be an
779 * I/O error if hba tag generation is not done.
781 pkt->ref_tag = cpu_to_le32((uint32_t)
782 (0xffffffff & scsi_get_lba(cmd)));
784 if (!qla2x00_hba_err_chk_enabled(sp))
787 pkt->ref_tag_mask[0] = 0xff;
788 pkt->ref_tag_mask[1] = 0xff;
789 pkt->ref_tag_mask[2] = 0xff;
790 pkt->ref_tag_mask[3] = 0xff;
794 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
795 * match LBA in CDB + N
797 case SCSI_PROT_DIF_TYPE2:
798 pkt->app_tag = cpu_to_le16(0);
799 pkt->app_tag_mask[0] = 0x0;
800 pkt->app_tag_mask[1] = 0x0;
802 pkt->ref_tag = cpu_to_le32((uint32_t)
803 (0xffffffff & scsi_get_lba(cmd)));
805 if (!qla2x00_hba_err_chk_enabled(sp))
808 /* enable ALL bytes of the ref tag */
809 pkt->ref_tag_mask[0] = 0xff;
810 pkt->ref_tag_mask[1] = 0xff;
811 pkt->ref_tag_mask[2] = 0xff;
812 pkt->ref_tag_mask[3] = 0xff;
815 /* For Type 3 protection: 16 bit GUARD only */
816 case SCSI_PROT_DIF_TYPE3:
817 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
818 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
823 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
826 case SCSI_PROT_DIF_TYPE1:
827 pkt->ref_tag = cpu_to_le32((uint32_t)
828 (0xffffffff & scsi_get_lba(cmd)));
829 pkt->app_tag = cpu_to_le16(0);
830 pkt->app_tag_mask[0] = 0x0;
831 pkt->app_tag_mask[1] = 0x0;
833 if (!qla2x00_hba_err_chk_enabled(sp))
836 /* enable ALL bytes of the ref tag */
837 pkt->ref_tag_mask[0] = 0xff;
838 pkt->ref_tag_mask[1] = 0xff;
839 pkt->ref_tag_mask[2] = 0xff;
840 pkt->ref_tag_mask[3] = 0xff;
846 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
849 struct scatterlist *sg;
850 uint32_t cumulative_partial, sg_len;
851 dma_addr_t sg_dma_addr;
853 if (sgx->num_bytes == sgx->tot_bytes)
857 cumulative_partial = sgx->tot_partial;
859 sg_dma_addr = sg_dma_address(sg);
860 sg_len = sg_dma_len(sg);
862 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
864 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
865 sgx->dma_len = (blk_sz - cumulative_partial);
866 sgx->tot_partial = 0;
867 sgx->num_bytes += blk_sz;
870 sgx->dma_len = sg_len - sgx->bytes_consumed;
871 sgx->tot_partial += sgx->dma_len;
875 sgx->bytes_consumed += sgx->dma_len;
877 if (sg_len == sgx->bytes_consumed) {
881 sgx->bytes_consumed = 0;
888 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
889 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
892 uint8_t avail_dsds = 0;
893 uint32_t dsd_list_len;
894 struct dsd_dma *dsd_ptr;
895 struct scatterlist *sg_prot;
896 struct dsd64 *cur_dsd = dsd;
897 uint16_t used_dsds = tot_dsds;
898 uint32_t prot_int; /* protection interval */
902 uint32_t sle_dma_len, tot_prot_dma_len = 0;
903 struct scsi_cmnd *cmd;
905 memset(&sgx, 0, sizeof(struct qla2_sgx));
907 cmd = GET_CMD_SP(sp);
908 prot_int = cmd->device->sector_size;
910 sgx.tot_bytes = scsi_bufflen(cmd);
911 sgx.cur_sg = scsi_sglist(cmd);
914 sg_prot = scsi_prot_sglist(cmd);
916 prot_int = tc->blk_sz;
917 sgx.tot_bytes = tc->bufflen;
919 sg_prot = tc->prot_sg;
925 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
927 sle_dma = sgx.dma_addr;
928 sle_dma_len = sgx.dma_len;
930 /* Allocate additional continuation packets? */
931 if (avail_dsds == 0) {
932 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
933 QLA_DSDS_PER_IOCB : used_dsds;
934 dsd_list_len = (avail_dsds + 1) * 12;
935 used_dsds -= avail_dsds;
937 /* allocate tracking DS */
938 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
942 /* allocate new list */
943 dsd_ptr->dsd_addr = next_dsd =
944 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
945 &dsd_ptr->dsd_list_dma);
949 * Need to cleanup only this dsd_ptr, rest
950 * will be done by sp_free_dma()
957 list_add_tail(&dsd_ptr->list,
958 &sp->u.scmd.crc_ctx->dsd_list);
960 sp->flags |= SRB_CRC_CTX_DSD_VALID;
962 list_add_tail(&dsd_ptr->list,
963 &(tc->ctx->dsd_list));
964 *tc->ctx_dsd_alloced = 1;
968 /* add new list to cmd iocb or last list */
969 put_unaligned_le64(dsd_ptr->dsd_list_dma,
971 cur_dsd->length = cpu_to_le32(dsd_list_len);
974 put_unaligned_le64(sle_dma, &cur_dsd->address);
975 cur_dsd->length = cpu_to_le32(sle_dma_len);
980 /* Got a full protection interval */
981 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
984 tot_prot_dma_len += sle_dma_len;
985 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
986 tot_prot_dma_len = 0;
987 sg_prot = sg_next(sg_prot);
990 partial = 1; /* So as to not re-enter this block */
994 /* Null termination */
995 cur_dsd->address = 0;
1002 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1003 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1006 uint8_t avail_dsds = 0;
1007 uint32_t dsd_list_len;
1008 struct dsd_dma *dsd_ptr;
1009 struct scatterlist *sg, *sgl;
1010 struct dsd64 *cur_dsd = dsd;
1012 uint16_t used_dsds = tot_dsds;
1013 struct scsi_cmnd *cmd;
1016 cmd = GET_CMD_SP(sp);
1017 sgl = scsi_sglist(cmd);
1026 for_each_sg(sgl, sg, tot_dsds, i) {
1027 /* Allocate additional continuation packets? */
1028 if (avail_dsds == 0) {
1029 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1030 QLA_DSDS_PER_IOCB : used_dsds;
1031 dsd_list_len = (avail_dsds + 1) * 12;
1032 used_dsds -= avail_dsds;
1034 /* allocate tracking DS */
1035 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1039 /* allocate new list */
1040 dsd_ptr->dsd_addr = next_dsd =
1041 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1042 &dsd_ptr->dsd_list_dma);
1046 * Need to cleanup only this dsd_ptr, rest
1047 * will be done by sp_free_dma()
1054 list_add_tail(&dsd_ptr->list,
1055 &sp->u.scmd.crc_ctx->dsd_list);
1057 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1059 list_add_tail(&dsd_ptr->list,
1060 &(tc->ctx->dsd_list));
1061 *tc->ctx_dsd_alloced = 1;
1064 /* add new list to cmd iocb or last list */
1065 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1067 cur_dsd->length = cpu_to_le32(dsd_list_len);
1070 append_dsd64(&cur_dsd, sg);
1074 /* Null termination */
1075 cur_dsd->address = 0;
1076 cur_dsd->length = 0;
1082 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1083 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1085 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1086 struct scatterlist *sg, *sgl;
1087 struct crc_context *difctx = NULL;
1088 struct scsi_qla_host *vha;
1090 uint avail_dsds = 0;
1091 uint used_dsds = tot_dsds;
1092 bool dif_local_dma_alloc = false;
1093 bool direction_to_device = false;
1097 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1099 sgl = scsi_prot_sglist(cmd);
1101 difctx = sp->u.scmd.crc_ctx;
1102 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1103 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1104 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1105 __func__, cmd, difctx, sp);
1110 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1117 "%s: enter (write=%u)\n", __func__, direction_to_device);
1119 /* if initiator doing write or target doing read */
1120 if (direction_to_device) {
1121 for_each_sg(sgl, sg, tot_dsds, i) {
1122 u64 sle_phys = sg_phys(sg);
1124 /* If SGE addr + len flips bits in upper 32-bits */
1125 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1126 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1127 "%s: page boundary crossing (phys=%llx len=%x)\n",
1128 __func__, sle_phys, sg->length);
1131 ha->dif_bundle_crossed_pages++;
1132 dif_local_dma_alloc = true;
1134 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1136 "%s: difctx pointer is NULL\n",
1142 ha->dif_bundle_writes++;
1144 ha->dif_bundle_reads++;
1147 if (ql2xdifbundlinginternalbuffers)
1148 dif_local_dma_alloc = direction_to_device;
1150 if (dif_local_dma_alloc) {
1151 u32 track_difbundl_buf = 0;
1152 u32 ldma_sg_len = 0;
1155 difctx->no_dif_bundl = 0;
1156 difctx->dif_bundl_len = 0;
1158 /* Track DSD buffers */
1159 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1160 /* Track local DMA buffers */
1161 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1163 for_each_sg(sgl, sg, tot_dsds, i) {
1164 u32 sglen = sg_dma_len(sg);
1166 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1167 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1168 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1169 difctx->dif_bundl_len, ldma_needed);
1176 * Allocate list item to store
1179 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1182 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1183 "%s: failed alloc dsd_ptr\n",
1187 ha->dif_bundle_kallocs++;
1189 /* allocate dma buffer */
1190 dsd_ptr->dsd_addr = dma_pool_alloc
1191 (ha->dif_bundl_pool, GFP_ATOMIC,
1192 &dsd_ptr->dsd_list_dma);
1193 if (!dsd_ptr->dsd_addr) {
1194 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1195 "%s: failed alloc ->dsd_ptr\n",
1198 * need to cleanup only this
1199 * dsd_ptr rest will be done
1203 ha->dif_bundle_kallocs--;
1206 ha->dif_bundle_dma_allocs++;
1208 difctx->no_dif_bundl++;
1209 list_add_tail(&dsd_ptr->list,
1210 &difctx->ldif_dma_hndl_list);
1213 /* xfrlen is min of dma pool size and sglen */
1215 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1216 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1219 /* replace with local allocated dma buffer */
1220 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1221 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1222 difctx->dif_bundl_len);
1223 difctx->dif_bundl_len += xfrlen;
1225 ldma_sg_len += xfrlen;
1226 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1234 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1235 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1236 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1237 difctx->dif_bundl_len, difctx->no_dif_bundl,
1238 track_difbundl_buf);
1241 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1243 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1245 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1246 &difctx->ldif_dma_hndl_list, list) {
1247 u32 sglen = (difctx->dif_bundl_len >
1248 DIF_BUNDLING_DMA_POOL_SIZE) ?
1249 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1251 BUG_ON(track_difbundl_buf == 0);
1253 /* Allocate additional continuation packets? */
1254 if (avail_dsds == 0) {
1255 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1257 "%s: adding continuation iocb's\n",
1259 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1260 QLA_DSDS_PER_IOCB : used_dsds;
1261 dsd_list_len = (avail_dsds + 1) * 12;
1262 used_dsds -= avail_dsds;
1264 /* allocate tracking DS */
1265 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1267 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1268 "%s: failed alloc dsd_ptr\n",
1272 ha->dif_bundle_kallocs++;
1274 difctx->no_ldif_dsd++;
1275 /* allocate new list */
1277 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1278 &dsd_ptr->dsd_list_dma);
1279 if (!dsd_ptr->dsd_addr) {
1280 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1281 "%s: failed alloc ->dsd_addr\n",
1284 * need to cleanup only this dsd_ptr
1285 * rest will be done by sp_free_dma()
1288 ha->dif_bundle_kallocs--;
1291 ha->dif_bundle_dma_allocs++;
1294 list_add_tail(&dsd_ptr->list,
1295 &difctx->ldif_dsd_list);
1296 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1298 list_add_tail(&dsd_ptr->list,
1299 &difctx->ldif_dsd_list);
1300 tc->ctx_dsd_alloced = 1;
1303 /* add new list to cmd iocb or last list */
1304 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1306 cur_dsd->length = cpu_to_le32(dsd_list_len);
1307 cur_dsd = dsd_ptr->dsd_addr;
1309 put_unaligned_le64(dif_dsd->dsd_list_dma,
1311 cur_dsd->length = cpu_to_le32(sglen);
1314 difctx->dif_bundl_len -= sglen;
1315 track_difbundl_buf--;
1318 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1319 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1320 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1322 for_each_sg(sgl, sg, tot_dsds, i) {
1323 /* Allocate additional continuation packets? */
1324 if (avail_dsds == 0) {
1325 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1326 QLA_DSDS_PER_IOCB : used_dsds;
1327 dsd_list_len = (avail_dsds + 1) * 12;
1328 used_dsds -= avail_dsds;
1330 /* allocate tracking DS */
1331 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1333 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1335 "%s: failed alloc dsd_dma...\n",
1340 /* allocate new list */
1342 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1343 &dsd_ptr->dsd_list_dma);
1344 if (!dsd_ptr->dsd_addr) {
1345 /* need to cleanup only this dsd_ptr */
1346 /* rest will be done by sp_free_dma() */
1352 list_add_tail(&dsd_ptr->list,
1354 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1356 list_add_tail(&dsd_ptr->list,
1358 tc->ctx_dsd_alloced = 1;
1361 /* add new list to cmd iocb or last list */
1362 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1364 cur_dsd->length = cpu_to_le32(dsd_list_len);
1365 cur_dsd = dsd_ptr->dsd_addr;
1367 append_dsd64(&cur_dsd, sg);
1371 /* Null termination */
1372 cur_dsd->address = 0;
1373 cur_dsd->length = 0;
1379 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1380 * Type 6 IOCB types.
1382 * @sp: SRB command to process
1383 * @cmd_pkt: Command type 3 IOCB
1384 * @tot_dsds: Total number of segments to transfer
1385 * @tot_prot_dsds: Total number of segments with protection information
1386 * @fw_prot_opts: Protection options to be passed to firmware
1389 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1390 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1392 struct dsd64 *cur_dsd;
1394 scsi_qla_host_t *vha;
1395 struct scsi_cmnd *cmd;
1396 uint32_t total_bytes = 0;
1397 uint32_t data_bytes;
1399 uint8_t bundling = 1;
1401 struct crc_context *crc_ctx_pkt = NULL;
1402 struct qla_hw_data *ha;
1403 uint8_t additional_fcpcdb_len;
1404 uint16_t fcp_cmnd_len;
1405 struct fcp_cmnd *fcp_cmnd;
1406 dma_addr_t crc_ctx_dma;
1408 cmd = GET_CMD_SP(sp);
1410 /* Update entry type to indicate Command Type CRC_2 IOCB */
1411 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1416 /* No data transfer */
1417 data_bytes = scsi_bufflen(cmd);
1418 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1419 cmd_pkt->byte_count = cpu_to_le32(0);
1423 cmd_pkt->vp_index = sp->vha->vp_idx;
1425 /* Set transfer direction */
1426 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1427 cmd_pkt->control_flags =
1428 cpu_to_le16(CF_WRITE_DATA);
1429 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1430 cmd_pkt->control_flags =
1431 cpu_to_le16(CF_READ_DATA);
1434 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1436 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1437 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1440 /* Allocate CRC context from global pool */
1441 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1442 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1445 goto crc_queuing_error;
1447 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1449 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1452 crc_ctx_pkt->handle = cmd_pkt->handle;
1454 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1456 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1457 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1459 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1460 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1462 /* Determine SCSI command length -- align to 4 byte boundary */
1463 if (cmd->cmd_len > 16) {
1464 additional_fcpcdb_len = cmd->cmd_len - 16;
1465 if ((cmd->cmd_len % 4) != 0) {
1466 /* SCSI cmd > 16 bytes must be multiple of 4 */
1467 goto crc_queuing_error;
1469 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1471 additional_fcpcdb_len = 0;
1472 fcp_cmnd_len = 12 + 16 + 4;
1475 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1477 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1478 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1479 fcp_cmnd->additional_cdb_len |= 1;
1480 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1481 fcp_cmnd->additional_cdb_len |= 2;
1483 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1484 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1485 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1486 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1487 &cmd_pkt->fcp_cmnd_dseg_address);
1488 fcp_cmnd->task_management = 0;
1489 fcp_cmnd->task_attribute = TSK_SIMPLE;
1491 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1493 /* Compute dif len and adjust data len to incude protection */
1495 blk_size = cmd->device->sector_size;
1496 dif_bytes = (data_bytes / blk_size) * 8;
1498 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1499 case SCSI_PROT_READ_INSERT:
1500 case SCSI_PROT_WRITE_STRIP:
1501 total_bytes = data_bytes;
1502 data_bytes += dif_bytes;
1505 case SCSI_PROT_READ_STRIP:
1506 case SCSI_PROT_WRITE_INSERT:
1507 case SCSI_PROT_READ_PASS:
1508 case SCSI_PROT_WRITE_PASS:
1509 total_bytes = data_bytes + dif_bytes;
1515 if (!qla2x00_hba_err_chk_enabled(sp))
1516 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1517 /* HBA error checking enabled */
1518 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1519 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1520 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1521 SCSI_PROT_DIF_TYPE2))
1522 fw_prot_opts |= BIT_10;
1523 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1524 SCSI_PROT_DIF_TYPE3)
1525 fw_prot_opts |= BIT_11;
1529 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1532 * Configure Bundling if we need to fetch interlaving
1533 * protection PCI accesses
1535 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1536 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1537 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1539 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1542 /* Finish the common fields of CRC pkt */
1543 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1544 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1545 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1546 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1547 /* Fibre channel byte count */
1548 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1549 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1550 additional_fcpcdb_len);
1551 *fcp_dl = htonl(total_bytes);
1553 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1554 cmd_pkt->byte_count = cpu_to_le32(0);
1557 /* Walks data segments */
1559 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1561 if (!bundling && tot_prot_dsds) {
1562 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1563 cur_dsd, tot_dsds, NULL))
1564 goto crc_queuing_error;
1565 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1566 (tot_dsds - tot_prot_dsds), NULL))
1567 goto crc_queuing_error;
1569 if (bundling && tot_prot_dsds) {
1570 /* Walks dif segments */
1571 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1572 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1573 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1574 tot_prot_dsds, NULL))
1575 goto crc_queuing_error;
1580 /* Cleanup will be performed by the caller */
1582 return QLA_FUNCTION_FAILED;
1586 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1587 * @sp: command to send to the ISP
1589 * Returns non-zero if a failure occurred, else zero.
1592 qla24xx_start_scsi(srb_t *sp)
1595 unsigned long flags;
1598 struct cmd_type_7 *cmd_pkt;
1602 struct req_que *req = NULL;
1603 struct rsp_que *rsp;
1604 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1605 struct scsi_qla_host *vha = sp->vha;
1606 struct qla_hw_data *ha = vha->hw;
1608 /* Setup device pointers. */
1612 /* So we know we haven't pci_map'ed anything yet */
1615 /* Send marker if required */
1616 if (vha->marker_needed != 0) {
1617 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1619 return QLA_FUNCTION_FAILED;
1620 vha->marker_needed = 0;
1623 /* Acquire ring specific lock */
1624 spin_lock_irqsave(&ha->hardware_lock, flags);
1626 handle = qla2xxx_get_next_handle(req);
1630 /* Map the sg table so we have an accurate count of sg entries needed */
1631 if (scsi_sg_count(cmd)) {
1632 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1633 scsi_sg_count(cmd), cmd->sc_data_direction);
1634 if (unlikely(!nseg))
1640 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1642 sp->iores.res_type = RESOURCE_INI;
1643 sp->iores.iocb_cnt = req_cnt;
1644 if (qla_get_iocbs(sp->qpair, &sp->iores))
1647 if (req->cnt < (req_cnt + 2)) {
1648 if (IS_SHADOW_REG_CAPABLE(ha)) {
1649 cnt = *req->out_ptr;
1651 cnt = rd_reg_dword_relaxed(req->req_q_out);
1652 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1656 if (req->ring_index < cnt)
1657 req->cnt = cnt - req->ring_index;
1659 req->cnt = req->length -
1660 (req->ring_index - cnt);
1661 if (req->cnt < (req_cnt + 2))
1665 /* Build command packet. */
1666 req->current_outstanding_cmd = handle;
1667 req->outstanding_cmds[handle] = sp;
1668 sp->handle = handle;
1669 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1670 req->cnt -= req_cnt;
1672 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1673 cmd_pkt->handle = make_handle(req->id, handle);
1675 /* Zero out remaining portion of packet. */
1676 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1677 clr_ptr = (uint32_t *)cmd_pkt + 2;
1678 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1679 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1681 /* Set NPORT-ID and LUN number*/
1682 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1683 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1684 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1685 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1686 cmd_pkt->vp_index = sp->vha->vp_idx;
1688 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1689 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1691 cmd_pkt->task = TSK_SIMPLE;
1693 /* Load SCSI command packet. */
1694 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1695 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1697 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1699 /* Build IOCB segments */
1700 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1702 /* Set total data segment count. */
1703 cmd_pkt->entry_count = (uint8_t)req_cnt;
1705 /* Adjust ring index. */
1707 if (req->ring_index == req->length) {
1708 req->ring_index = 0;
1709 req->ring_ptr = req->ring;
1713 sp->flags |= SRB_DMA_VALID;
1715 /* Set chip new ring index. */
1716 wrt_reg_dword(req->req_q_in, req->ring_index);
1718 /* Manage unprocessed RIO/ZIO commands in response queue. */
1719 if (vha->flags.process_response_queue &&
1720 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1721 qla24xx_process_response_queue(vha, rsp);
1723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1728 scsi_dma_unmap(cmd);
1730 qla_put_iocbs(sp->qpair, &sp->iores);
1731 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1733 return QLA_FUNCTION_FAILED;
1737 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1738 * @sp: command to send to the ISP
1740 * Returns non-zero if a failure occurred, else zero.
1743 qla24xx_dif_start_scsi(srb_t *sp)
1746 unsigned long flags;
1750 uint16_t req_cnt = 0;
1752 uint16_t tot_prot_dsds;
1753 uint16_t fw_prot_opts = 0;
1754 struct req_que *req = NULL;
1755 struct rsp_que *rsp = NULL;
1756 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1757 struct scsi_qla_host *vha = sp->vha;
1758 struct qla_hw_data *ha = vha->hw;
1759 struct cmd_type_crc_2 *cmd_pkt;
1760 uint32_t status = 0;
1762 #define QDSS_GOT_Q_SPACE BIT_0
1764 /* Only process protection or >16 cdb in this routine */
1765 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1766 if (cmd->cmd_len <= 16)
1767 return qla24xx_start_scsi(sp);
1770 /* Setup device pointers. */
1774 /* So we know we haven't pci_map'ed anything yet */
1777 /* Send marker if required */
1778 if (vha->marker_needed != 0) {
1779 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1781 return QLA_FUNCTION_FAILED;
1782 vha->marker_needed = 0;
1785 /* Acquire ring specific lock */
1786 spin_lock_irqsave(&ha->hardware_lock, flags);
1788 handle = qla2xxx_get_next_handle(req);
1792 /* Compute number of required data segments */
1793 /* Map the sg table so we have an accurate count of sg entries needed */
1794 if (scsi_sg_count(cmd)) {
1795 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1796 scsi_sg_count(cmd), cmd->sc_data_direction);
1797 if (unlikely(!nseg))
1800 sp->flags |= SRB_DMA_VALID;
1802 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1803 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1804 struct qla2_sgx sgx;
1807 memset(&sgx, 0, sizeof(struct qla2_sgx));
1808 sgx.tot_bytes = scsi_bufflen(cmd);
1809 sgx.cur_sg = scsi_sglist(cmd);
1813 while (qla24xx_get_one_block_sg(
1814 cmd->device->sector_size, &sgx, &partial))
1820 /* number of required data segments */
1823 /* Compute number of required protection segments */
1824 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1825 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1826 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1827 if (unlikely(!nseg))
1830 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1832 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1833 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1834 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1841 /* Total Data and protection sg segment(s) */
1842 tot_prot_dsds = nseg;
1845 sp->iores.res_type = RESOURCE_INI;
1846 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1847 if (qla_get_iocbs(sp->qpair, &sp->iores))
1850 if (req->cnt < (req_cnt + 2)) {
1851 if (IS_SHADOW_REG_CAPABLE(ha)) {
1852 cnt = *req->out_ptr;
1854 cnt = rd_reg_dword_relaxed(req->req_q_out);
1855 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1858 if (req->ring_index < cnt)
1859 req->cnt = cnt - req->ring_index;
1861 req->cnt = req->length -
1862 (req->ring_index - cnt);
1863 if (req->cnt < (req_cnt + 2))
1867 status |= QDSS_GOT_Q_SPACE;
1869 /* Build header part of command packet (excluding the OPCODE). */
1870 req->current_outstanding_cmd = handle;
1871 req->outstanding_cmds[handle] = sp;
1872 sp->handle = handle;
1873 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1874 req->cnt -= req_cnt;
1876 /* Fill-in common area */
1877 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1878 cmd_pkt->handle = make_handle(req->id, handle);
1880 clr_ptr = (uint32_t *)cmd_pkt + 2;
1881 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1883 /* Set NPORT-ID and LUN number*/
1884 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1885 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1886 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1887 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1889 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1890 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1892 /* Total Data and protection segment(s) */
1893 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1895 /* Build IOCB segments and adjust for data protection segments */
1896 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1897 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1901 cmd_pkt->entry_count = (uint8_t)req_cnt;
1902 /* Specify response queue number where completion should happen */
1903 cmd_pkt->entry_status = (uint8_t) rsp->id;
1904 cmd_pkt->timeout = cpu_to_le16(0);
1907 /* Adjust ring index. */
1909 if (req->ring_index == req->length) {
1910 req->ring_index = 0;
1911 req->ring_ptr = req->ring;
1915 /* Set chip new ring index. */
1916 wrt_reg_dword(req->req_q_in, req->ring_index);
1918 /* Manage unprocessed RIO/ZIO commands in response queue. */
1919 if (vha->flags.process_response_queue &&
1920 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1921 qla24xx_process_response_queue(vha, rsp);
1923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1928 if (status & QDSS_GOT_Q_SPACE) {
1929 req->outstanding_cmds[handle] = NULL;
1930 req->cnt += req_cnt;
1932 /* Cleanup will be performed by the caller (queuecommand) */
1934 qla_put_iocbs(sp->qpair, &sp->iores);
1935 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1937 return QLA_FUNCTION_FAILED;
1941 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1942 * @sp: command to send to the ISP
1944 * Returns non-zero if a failure occurred, else zero.
1947 qla2xxx_start_scsi_mq(srb_t *sp)
1950 unsigned long flags;
1953 struct cmd_type_7 *cmd_pkt;
1957 struct req_que *req = NULL;
1958 struct rsp_que *rsp;
1959 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1960 struct scsi_qla_host *vha = sp->fcport->vha;
1961 struct qla_hw_data *ha = vha->hw;
1962 struct qla_qpair *qpair = sp->qpair;
1964 /* Acquire qpair specific lock */
1965 spin_lock_irqsave(&qpair->qp_lock, flags);
1967 /* Setup qpair pointers */
1971 /* So we know we haven't pci_map'ed anything yet */
1974 /* Send marker if required */
1975 if (vha->marker_needed != 0) {
1976 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1978 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1979 return QLA_FUNCTION_FAILED;
1981 vha->marker_needed = 0;
1984 handle = qla2xxx_get_next_handle(req);
1988 /* Map the sg table so we have an accurate count of sg entries needed */
1989 if (scsi_sg_count(cmd)) {
1990 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1991 scsi_sg_count(cmd), cmd->sc_data_direction);
1992 if (unlikely(!nseg))
1998 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2000 sp->iores.res_type = RESOURCE_INI;
2001 sp->iores.iocb_cnt = req_cnt;
2002 if (qla_get_iocbs(sp->qpair, &sp->iores))
2005 if (req->cnt < (req_cnt + 2)) {
2006 if (IS_SHADOW_REG_CAPABLE(ha)) {
2007 cnt = *req->out_ptr;
2009 cnt = rd_reg_dword_relaxed(req->req_q_out);
2010 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2014 if (req->ring_index < cnt)
2015 req->cnt = cnt - req->ring_index;
2017 req->cnt = req->length -
2018 (req->ring_index - cnt);
2019 if (req->cnt < (req_cnt + 2))
2023 /* Build command packet. */
2024 req->current_outstanding_cmd = handle;
2025 req->outstanding_cmds[handle] = sp;
2026 sp->handle = handle;
2027 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2028 req->cnt -= req_cnt;
2030 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2031 cmd_pkt->handle = make_handle(req->id, handle);
2033 /* Zero out remaining portion of packet. */
2034 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2035 clr_ptr = (uint32_t *)cmd_pkt + 2;
2036 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2037 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2039 /* Set NPORT-ID and LUN number*/
2040 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2041 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2042 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2043 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2044 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2046 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2047 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2049 cmd_pkt->task = TSK_SIMPLE;
2051 /* Load SCSI command packet. */
2052 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2053 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2055 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2057 /* Build IOCB segments */
2058 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2060 /* Set total data segment count. */
2061 cmd_pkt->entry_count = (uint8_t)req_cnt;
2063 /* Adjust ring index. */
2065 if (req->ring_index == req->length) {
2066 req->ring_index = 0;
2067 req->ring_ptr = req->ring;
2071 sp->flags |= SRB_DMA_VALID;
2073 /* Set chip new ring index. */
2074 wrt_reg_dword(req->req_q_in, req->ring_index);
2076 /* Manage unprocessed RIO/ZIO commands in response queue. */
2077 if (vha->flags.process_response_queue &&
2078 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2079 qla24xx_process_response_queue(vha, rsp);
2081 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2086 scsi_dma_unmap(cmd);
2088 qla_put_iocbs(sp->qpair, &sp->iores);
2089 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2091 return QLA_FUNCTION_FAILED;
2096 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2097 * @sp: command to send to the ISP
2099 * Returns non-zero if a failure occurred, else zero.
2102 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2105 unsigned long flags;
2109 uint16_t req_cnt = 0;
2111 uint16_t tot_prot_dsds;
2112 uint16_t fw_prot_opts = 0;
2113 struct req_que *req = NULL;
2114 struct rsp_que *rsp = NULL;
2115 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2116 struct scsi_qla_host *vha = sp->fcport->vha;
2117 struct qla_hw_data *ha = vha->hw;
2118 struct cmd_type_crc_2 *cmd_pkt;
2119 uint32_t status = 0;
2120 struct qla_qpair *qpair = sp->qpair;
2122 #define QDSS_GOT_Q_SPACE BIT_0
2124 /* Check for host side state */
2125 if (!qpair->online) {
2126 cmd->result = DID_NO_CONNECT << 16;
2127 return QLA_INTERFACE_ERROR;
2130 if (!qpair->difdix_supported &&
2131 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2132 cmd->result = DID_NO_CONNECT << 16;
2133 return QLA_INTERFACE_ERROR;
2136 /* Only process protection or >16 cdb in this routine */
2137 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2138 if (cmd->cmd_len <= 16)
2139 return qla2xxx_start_scsi_mq(sp);
2142 spin_lock_irqsave(&qpair->qp_lock, flags);
2144 /* Setup qpair pointers */
2148 /* So we know we haven't pci_map'ed anything yet */
2151 /* Send marker if required */
2152 if (vha->marker_needed != 0) {
2153 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2155 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2156 return QLA_FUNCTION_FAILED;
2158 vha->marker_needed = 0;
2161 handle = qla2xxx_get_next_handle(req);
2165 /* Compute number of required data segments */
2166 /* Map the sg table so we have an accurate count of sg entries needed */
2167 if (scsi_sg_count(cmd)) {
2168 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2169 scsi_sg_count(cmd), cmd->sc_data_direction);
2170 if (unlikely(!nseg))
2173 sp->flags |= SRB_DMA_VALID;
2175 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2176 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2177 struct qla2_sgx sgx;
2180 memset(&sgx, 0, sizeof(struct qla2_sgx));
2181 sgx.tot_bytes = scsi_bufflen(cmd);
2182 sgx.cur_sg = scsi_sglist(cmd);
2186 while (qla24xx_get_one_block_sg(
2187 cmd->device->sector_size, &sgx, &partial))
2193 /* number of required data segments */
2196 /* Compute number of required protection segments */
2197 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2198 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2199 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2200 if (unlikely(!nseg))
2203 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2205 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2206 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2207 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2214 /* Total Data and protection sg segment(s) */
2215 tot_prot_dsds = nseg;
2218 sp->iores.res_type = RESOURCE_INI;
2219 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2220 if (qla_get_iocbs(sp->qpair, &sp->iores))
2223 if (req->cnt < (req_cnt + 2)) {
2224 if (IS_SHADOW_REG_CAPABLE(ha)) {
2225 cnt = *req->out_ptr;
2227 cnt = rd_reg_dword_relaxed(req->req_q_out);
2228 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2232 if (req->ring_index < cnt)
2233 req->cnt = cnt - req->ring_index;
2235 req->cnt = req->length -
2236 (req->ring_index - cnt);
2237 if (req->cnt < (req_cnt + 2))
2241 status |= QDSS_GOT_Q_SPACE;
2243 /* Build header part of command packet (excluding the OPCODE). */
2244 req->current_outstanding_cmd = handle;
2245 req->outstanding_cmds[handle] = sp;
2246 sp->handle = handle;
2247 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2248 req->cnt -= req_cnt;
2250 /* Fill-in common area */
2251 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2252 cmd_pkt->handle = make_handle(req->id, handle);
2254 clr_ptr = (uint32_t *)cmd_pkt + 2;
2255 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2257 /* Set NPORT-ID and LUN number*/
2258 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2259 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2260 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2261 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2263 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2264 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2266 /* Total Data and protection segment(s) */
2267 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2269 /* Build IOCB segments and adjust for data protection segments */
2270 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2271 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2275 cmd_pkt->entry_count = (uint8_t)req_cnt;
2276 cmd_pkt->timeout = cpu_to_le16(0);
2279 /* Adjust ring index. */
2281 if (req->ring_index == req->length) {
2282 req->ring_index = 0;
2283 req->ring_ptr = req->ring;
2287 /* Set chip new ring index. */
2288 wrt_reg_dword(req->req_q_in, req->ring_index);
2290 /* Manage unprocessed RIO/ZIO commands in response queue. */
2291 if (vha->flags.process_response_queue &&
2292 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2293 qla24xx_process_response_queue(vha, rsp);
2295 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2300 if (status & QDSS_GOT_Q_SPACE) {
2301 req->outstanding_cmds[handle] = NULL;
2302 req->cnt += req_cnt;
2304 /* Cleanup will be performed by the caller (queuecommand) */
2306 qla_put_iocbs(sp->qpair, &sp->iores);
2307 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2309 return QLA_FUNCTION_FAILED;
2312 /* Generic Control-SRB manipulation functions. */
2314 /* hardware_lock assumed to be held. */
2317 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2319 scsi_qla_host_t *vha = qpair->vha;
2320 struct qla_hw_data *ha = vha->hw;
2321 struct req_que *req = qpair->req;
2322 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2325 uint16_t cnt, req_cnt;
2331 if (sp && (sp->type != SRB_SCSI_CMD)) {
2332 /* Adjust entry-counts as needed. */
2333 req_cnt = sp->iocbs;
2336 /* Check for room on request queue. */
2337 if (req->cnt < req_cnt + 2) {
2338 if (qpair->use_shadow_reg)
2339 cnt = *req->out_ptr;
2340 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2342 cnt = rd_reg_dword(®->isp25mq.req_q_out);
2343 else if (IS_P3P_TYPE(ha))
2344 cnt = rd_reg_dword(reg->isp82.req_q_out);
2345 else if (IS_FWI2_CAPABLE(ha))
2346 cnt = rd_reg_dword(®->isp24.req_q_out);
2347 else if (IS_QLAFX00(ha))
2348 cnt = rd_reg_dword(®->ispfx00.req_q_out);
2350 cnt = qla2x00_debounce_register(
2351 ISP_REQ_Q_OUT(ha, ®->isp));
2353 if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2354 qla_schedule_eeh_work(vha);
2358 if (req->ring_index < cnt)
2359 req->cnt = cnt - req->ring_index;
2361 req->cnt = req->length -
2362 (req->ring_index - cnt);
2364 if (req->cnt < req_cnt + 2)
2368 handle = qla2xxx_get_next_handle(req);
2370 ql_log(ql_log_warn, vha, 0x700b,
2371 "No room on outstanding cmd array.\n");
2375 /* Prep command array. */
2376 req->current_outstanding_cmd = handle;
2377 req->outstanding_cmds[handle] = sp;
2378 sp->handle = handle;
2382 req->cnt -= req_cnt;
2383 pkt = req->ring_ptr;
2384 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2385 if (IS_QLAFX00(ha)) {
2386 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2387 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2389 pkt->entry_count = req_cnt;
2390 pkt->handle = handle;
2396 qpair->tgt_counters.num_alloc_iocb_failed++;
2401 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2403 scsi_qla_host_t *vha = qpair->vha;
2405 if (qla2x00_reset_active(vha))
2408 return __qla2x00_alloc_iocbs(qpair, sp);
2412 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2414 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2418 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2420 struct srb_iocb *lio = &sp->u.iocb_cmd;
2422 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2423 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2424 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2425 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2426 if (sp->vha->flags.nvme_first_burst)
2427 logio->io_parameter[0] =
2428 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2429 if (sp->vha->flags.nvme2_enabled) {
2430 /* Set service parameter BIT_7 for NVME CONF support */
2431 logio->io_parameter[0] |=
2432 cpu_to_le32(NVME_PRLI_SP_CONF);
2433 /* Set service parameter BIT_8 for SLER support */
2434 logio->io_parameter[0] |=
2435 cpu_to_le32(NVME_PRLI_SP_SLER);
2436 /* Set service parameter BIT_9 for PI control support */
2437 logio->io_parameter[0] |=
2438 cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2442 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2443 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2444 logio->port_id[1] = sp->fcport->d_id.b.area;
2445 logio->port_id[2] = sp->fcport->d_id.b.domain;
2446 logio->vp_index = sp->vha->vp_idx;
2450 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2452 struct srb_iocb *lio = &sp->u.iocb_cmd;
2454 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2455 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2457 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2458 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2460 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2461 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2462 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2463 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2464 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2466 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2467 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2468 logio->port_id[1] = sp->fcport->d_id.b.area;
2469 logio->port_id[2] = sp->fcport->d_id.b.domain;
2470 logio->vp_index = sp->vha->vp_idx;
2474 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2476 struct qla_hw_data *ha = sp->vha->hw;
2477 struct srb_iocb *lio = &sp->u.iocb_cmd;
2480 mbx->entry_type = MBX_IOCB_TYPE;
2481 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2482 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2483 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2484 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2485 if (HAS_EXTENDED_IDS(ha)) {
2486 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2487 mbx->mb10 = cpu_to_le16(opts);
2489 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2491 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2492 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2493 sp->fcport->d_id.b.al_pa);
2494 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2498 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2500 u16 control_flags = LCF_COMMAND_LOGO;
2501 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2503 if (sp->fcport->explicit_logout) {
2504 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2506 control_flags |= LCF_IMPL_LOGO;
2508 if (!sp->fcport->keep_nport_handle)
2509 control_flags |= LCF_FREE_NPORT;
2512 logio->control_flags = cpu_to_le16(control_flags);
2513 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2514 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2515 logio->port_id[1] = sp->fcport->d_id.b.area;
2516 logio->port_id[2] = sp->fcport->d_id.b.domain;
2517 logio->vp_index = sp->vha->vp_idx;
2521 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2523 struct qla_hw_data *ha = sp->vha->hw;
2525 mbx->entry_type = MBX_IOCB_TYPE;
2526 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2527 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2528 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2529 cpu_to_le16(sp->fcport->loop_id) :
2530 cpu_to_le16(sp->fcport->loop_id << 8);
2531 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2532 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2533 sp->fcport->d_id.b.al_pa);
2534 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2535 /* Implicit: mbx->mbx10 = 0. */
2539 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2541 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2542 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2543 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2544 logio->vp_index = sp->vha->vp_idx;
2548 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2550 struct qla_hw_data *ha = sp->vha->hw;
2552 mbx->entry_type = MBX_IOCB_TYPE;
2553 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2554 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2555 if (HAS_EXTENDED_IDS(ha)) {
2556 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2557 mbx->mb10 = cpu_to_le16(BIT_0);
2559 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2561 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2562 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2563 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2564 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2565 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2569 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2573 struct fc_port *fcport = sp->fcport;
2574 scsi_qla_host_t *vha = fcport->vha;
2575 struct qla_hw_data *ha = vha->hw;
2576 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2577 struct req_que *req = vha->req;
2579 flags = iocb->u.tmf.flags;
2580 lun = iocb->u.tmf.lun;
2582 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2583 tsk->entry_count = 1;
2584 tsk->handle = make_handle(req->id, tsk->handle);
2585 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2586 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2587 tsk->control_flags = cpu_to_le32(flags);
2588 tsk->port_id[0] = fcport->d_id.b.al_pa;
2589 tsk->port_id[1] = fcport->d_id.b.area;
2590 tsk->port_id[2] = fcport->d_id.b.domain;
2591 tsk->vp_index = fcport->vha->vp_idx;
2593 if (flags == TCF_LUN_RESET) {
2594 int_to_scsilun(lun, &tsk->lun);
2595 host_to_fcp_swap((uint8_t *)&tsk->lun,
2600 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2602 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2603 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2604 sp->free = qla2x00_sp_free;
2605 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2606 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2607 sp->start_timer = 1;
2610 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2612 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2616 if (elsio->u.els_logo.els_logo_pyld)
2617 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2618 elsio->u.els_logo.els_logo_pyld,
2619 elsio->u.els_logo.els_logo_pyld_dma);
2621 del_timer(&elsio->timer);
2626 qla2x00_els_dcmd_iocb_timeout(void *data)
2629 fc_port_t *fcport = sp->fcport;
2630 struct scsi_qla_host *vha = sp->vha;
2631 struct srb_iocb *lio = &sp->u.iocb_cmd;
2632 unsigned long flags = 0;
2635 ql_dbg(ql_dbg_io, vha, 0x3069,
2636 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2637 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2638 fcport->d_id.b.al_pa);
2640 /* Abort the exchange */
2641 res = qla24xx_async_abort_cmd(sp, false);
2643 ql_dbg(ql_dbg_io, vha, 0x3070,
2644 "mbx abort_command failed.\n");
2645 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2646 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2647 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2648 sp->qpair->req->outstanding_cmds[h] = NULL;
2652 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2653 complete(&lio->u.els_logo.comp);
2655 ql_dbg(ql_dbg_io, vha, 0x3071,
2656 "mbx abort_command success.\n");
2660 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2662 fc_port_t *fcport = sp->fcport;
2663 struct srb_iocb *lio = &sp->u.iocb_cmd;
2664 struct scsi_qla_host *vha = sp->vha;
2666 ql_dbg(ql_dbg_io, vha, 0x3072,
2667 "%s hdl=%x, portid=%02x%02x%02x done\n",
2668 sp->name, sp->handle, fcport->d_id.b.domain,
2669 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2671 complete(&lio->u.els_logo.comp);
2675 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2676 port_id_t remote_did)
2679 fc_port_t *fcport = NULL;
2680 struct srb_iocb *elsio = NULL;
2681 struct qla_hw_data *ha = vha->hw;
2682 struct els_logo_payload logo_pyld;
2683 int rval = QLA_SUCCESS;
2685 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2687 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2691 /* Alloc SRB structure */
2692 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2695 ql_log(ql_log_info, vha, 0x70e6,
2696 "SRB allocation failed\n");
2700 elsio = &sp->u.iocb_cmd;
2701 fcport->loop_id = 0xFFFF;
2702 fcport->d_id.b.domain = remote_did.b.domain;
2703 fcport->d_id.b.area = remote_did.b.area;
2704 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2706 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2707 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2709 sp->type = SRB_ELS_DCMD;
2710 sp->name = "ELS_DCMD";
2711 sp->fcport = fcport;
2712 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2713 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2714 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2715 sp->done = qla2x00_els_dcmd_sp_done;
2716 sp->free = qla2x00_els_dcmd_sp_free;
2718 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2719 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2722 if (!elsio->u.els_logo.els_logo_pyld) {
2724 return QLA_FUNCTION_FAILED;
2727 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2729 elsio->u.els_logo.els_cmd = els_opcode;
2730 logo_pyld.opcode = els_opcode;
2731 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2732 logo_pyld.s_id[1] = vha->d_id.b.area;
2733 logo_pyld.s_id[2] = vha->d_id.b.domain;
2734 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2735 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2737 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2738 sizeof(struct els_logo_payload));
2739 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2740 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2741 elsio->u.els_logo.els_logo_pyld,
2742 sizeof(*elsio->u.els_logo.els_logo_pyld));
2744 rval = qla2x00_start_sp(sp);
2745 if (rval != QLA_SUCCESS) {
2747 return QLA_FUNCTION_FAILED;
2750 ql_dbg(ql_dbg_io, vha, 0x3074,
2751 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2752 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2753 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2755 wait_for_completion(&elsio->u.els_logo.comp);
2762 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2764 scsi_qla_host_t *vha = sp->vha;
2765 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2767 els_iocb->entry_type = ELS_IOCB_TYPE;
2768 els_iocb->entry_count = 1;
2769 els_iocb->sys_define = 0;
2770 els_iocb->entry_status = 0;
2771 els_iocb->handle = sp->handle;
2772 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2773 els_iocb->tx_dsd_count = cpu_to_le16(1);
2774 els_iocb->vp_index = vha->vp_idx;
2775 els_iocb->sof_type = EST_SOFI3;
2776 els_iocb->rx_dsd_count = 0;
2777 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2779 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2780 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2781 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2782 /* For SID the byte order is different than DID */
2783 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2784 els_iocb->s_id[2] = vha->d_id.b.area;
2785 els_iocb->s_id[0] = vha->d_id.b.domain;
2787 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2788 els_iocb->control_flags = 0;
2789 els_iocb->tx_byte_count = els_iocb->tx_len =
2790 cpu_to_le32(sizeof(struct els_plogi_payload));
2791 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2792 &els_iocb->tx_address);
2793 els_iocb->rx_dsd_count = cpu_to_le16(1);
2794 els_iocb->rx_byte_count = els_iocb->rx_len =
2795 cpu_to_le32(sizeof(struct els_plogi_payload));
2796 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2797 &els_iocb->rx_address);
2799 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2800 "PLOGI ELS IOCB:\n");
2801 ql_dump_buffer(ql_log_info, vha, 0x0109,
2802 (uint8_t *)els_iocb,
2805 els_iocb->control_flags = cpu_to_le16(1 << 13);
2806 els_iocb->tx_byte_count =
2807 cpu_to_le32(sizeof(struct els_logo_payload));
2808 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2809 &els_iocb->tx_address);
2810 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2812 els_iocb->rx_byte_count = 0;
2813 els_iocb->rx_address = 0;
2814 els_iocb->rx_len = 0;
2815 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2817 ql_dump_buffer(ql_log_info, vha, 0x010b,
2822 sp->vha->qla_stats.control_requests++;
2826 qla2x00_els_dcmd2_iocb_timeout(void *data)
2829 fc_port_t *fcport = sp->fcport;
2830 struct scsi_qla_host *vha = sp->vha;
2831 unsigned long flags = 0;
2834 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2835 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2836 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2838 /* Abort the exchange */
2839 res = qla24xx_async_abort_cmd(sp, false);
2840 ql_dbg(ql_dbg_io, vha, 0x3070,
2841 "mbx abort_command %s\n",
2842 (res == QLA_SUCCESS) ? "successful" : "failed");
2844 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2845 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2846 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2847 sp->qpair->req->outstanding_cmds[h] = NULL;
2851 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2852 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2856 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2858 if (els_plogi->els_plogi_pyld)
2859 dma_free_coherent(&vha->hw->pdev->dev,
2861 els_plogi->els_plogi_pyld,
2862 els_plogi->els_plogi_pyld_dma);
2864 if (els_plogi->els_resp_pyld)
2865 dma_free_coherent(&vha->hw->pdev->dev,
2867 els_plogi->els_resp_pyld,
2868 els_plogi->els_resp_pyld_dma);
2871 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2873 fc_port_t *fcport = sp->fcport;
2874 struct srb_iocb *lio = &sp->u.iocb_cmd;
2875 struct scsi_qla_host *vha = sp->vha;
2876 struct event_arg ea;
2877 struct qla_work_evt *e;
2878 struct fc_port *conflict_fcport;
2879 port_id_t cid; /* conflict Nport id */
2880 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2883 ql_dbg(ql_dbg_disc, vha, 0x3072,
2884 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2885 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2887 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2888 del_timer(&sp->u.iocb_cmd.timer);
2890 if (sp->flags & SRB_WAKEUP_ON_COMP)
2891 complete(&lio->u.els_plogi.comp);
2893 switch (le32_to_cpu(fw_status[0])) {
2894 case CS_DATA_UNDERRUN:
2896 memset(&ea, 0, sizeof(ea));
2899 qla_handle_els_plogi_done(vha, &ea);
2903 switch (le32_to_cpu(fw_status[1])) {
2904 case LSC_SCODE_PORTID_USED:
2905 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2906 qlt_find_sess_invalidate_other(vha,
2907 wwn_to_u64(fcport->port_name),
2908 fcport->d_id, lid, &conflict_fcport);
2909 if (conflict_fcport) {
2911 * Another fcport shares the same
2912 * loop_id & nport id; conflict
2913 * fcport needs to finish cleanup
2914 * before this fcport can proceed
2917 conflict_fcport->conflict = fcport;
2918 fcport->login_pause = 1;
2919 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2920 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2923 fcport->d_id.b24, lid);
2925 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2926 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2929 fcport->d_id.b24, lid);
2930 qla2x00_clear_loop_id(fcport);
2931 set_bit(lid, vha->hw->loop_id_map);
2932 fcport->loop_id = lid;
2933 fcport->keep_nport_handle = 0;
2934 qlt_schedule_sess_for_deletion(fcport);
2938 case LSC_SCODE_NPORT_USED:
2939 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2941 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2943 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
2946 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2947 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2948 __func__, __LINE__, fcport->port_name,
2949 fcport->loop_id, cid.b24);
2950 set_bit(fcport->loop_id,
2951 vha->hw->loop_id_map);
2952 fcport->loop_id = FC_NO_LOOP_ID;
2953 qla24xx_post_gnl_work(vha, fcport);
2956 case LSC_SCODE_NOXCB:
2957 vha->hw->exch_starvation++;
2958 if (vha->hw->exch_starvation > 5) {
2959 ql_log(ql_log_warn, vha, 0xd046,
2960 "Exchange starvation. Resetting RISC\n");
2961 vha->hw->exch_starvation = 0;
2962 set_bit(ISP_ABORT_NEEDED,
2964 qla2xxx_wake_dpc(vha);
2968 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2969 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2970 __func__, sp->fcport->port_name,
2971 fw_status[0], fw_status[1], fw_status[2]);
2973 fcport->flags &= ~FCF_ASYNC_SENT;
2974 qla2x00_set_fcport_disc_state(fcport,
2976 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2982 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2983 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2984 __func__, sp->fcport->port_name,
2985 fw_status[0], fw_status[1], fw_status[2]);
2987 sp->fcport->flags &= ~FCF_ASYNC_SENT;
2988 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2989 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2993 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2995 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2997 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3002 qla2x00_post_work(vha, e);
3007 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3008 fc_port_t *fcport, bool wait)
3011 struct srb_iocb *elsio = NULL;
3012 struct qla_hw_data *ha = vha->hw;
3013 int rval = QLA_SUCCESS;
3014 void *ptr, *resp_ptr;
3016 /* Alloc SRB structure */
3017 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3019 ql_log(ql_log_info, vha, 0x70e6,
3020 "SRB allocation failed\n");
3021 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3025 fcport->flags |= FCF_ASYNC_SENT;
3026 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3027 elsio = &sp->u.iocb_cmd;
3028 ql_dbg(ql_dbg_io, vha, 0x3073,
3029 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
3031 sp->type = SRB_ELS_DCMD;
3032 sp->name = "ELS_DCMD";
3033 sp->fcport = fcport;
3035 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
3037 sp->flags = SRB_WAKEUP_ON_COMP;
3039 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
3041 sp->done = qla2x00_els_dcmd2_sp_done;
3042 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3044 ptr = elsio->u.els_plogi.els_plogi_pyld =
3045 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3046 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3048 if (!elsio->u.els_plogi.els_plogi_pyld) {
3049 rval = QLA_FUNCTION_FAILED;
3053 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3054 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3055 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3057 if (!elsio->u.els_plogi.els_resp_pyld) {
3058 rval = QLA_FUNCTION_FAILED;
3062 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3064 memset(ptr, 0, sizeof(struct els_plogi_payload));
3065 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3066 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3067 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3069 elsio->u.els_plogi.els_cmd = els_opcode;
3070 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3072 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3073 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3074 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3075 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3077 init_completion(&elsio->u.els_plogi.comp);
3078 rval = qla2x00_start_sp(sp);
3079 if (rval != QLA_SUCCESS) {
3080 rval = QLA_FUNCTION_FAILED;
3082 ql_dbg(ql_dbg_disc, vha, 0x3074,
3083 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3084 sp->name, sp->handle, fcport->loop_id,
3085 fcport->d_id.b24, vha->d_id.b24);
3089 wait_for_completion(&elsio->u.els_plogi.comp);
3091 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3092 rval = QLA_FUNCTION_FAILED;
3098 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3099 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3106 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3108 struct bsg_job *bsg_job = sp->u.bsg_job;
3109 struct fc_bsg_request *bsg_request = bsg_job->request;
3111 els_iocb->entry_type = ELS_IOCB_TYPE;
3112 els_iocb->entry_count = 1;
3113 els_iocb->sys_define = 0;
3114 els_iocb->entry_status = 0;
3115 els_iocb->handle = sp->handle;
3116 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3117 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3118 els_iocb->vp_index = sp->vha->vp_idx;
3119 els_iocb->sof_type = EST_SOFI3;
3120 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3123 sp->type == SRB_ELS_CMD_RPT ?
3124 bsg_request->rqst_data.r_els.els_code :
3125 bsg_request->rqst_data.h_els.command_code;
3126 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3127 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3128 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3129 els_iocb->control_flags = 0;
3130 els_iocb->rx_byte_count =
3131 cpu_to_le32(bsg_job->reply_payload.payload_len);
3132 els_iocb->tx_byte_count =
3133 cpu_to_le32(bsg_job->request_payload.payload_len);
3135 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3136 &els_iocb->tx_address);
3137 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3138 (bsg_job->request_payload.sg_list));
3140 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3141 &els_iocb->rx_address);
3142 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3143 (bsg_job->reply_payload.sg_list));
3145 sp->vha->qla_stats.control_requests++;
3149 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3151 uint16_t avail_dsds;
3152 struct dsd64 *cur_dsd;
3153 struct scatterlist *sg;
3156 scsi_qla_host_t *vha = sp->vha;
3157 struct qla_hw_data *ha = vha->hw;
3158 struct bsg_job *bsg_job = sp->u.bsg_job;
3159 int entry_count = 1;
3161 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3162 ct_iocb->entry_type = CT_IOCB_TYPE;
3163 ct_iocb->entry_status = 0;
3164 ct_iocb->handle1 = sp->handle;
3165 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3166 ct_iocb->status = cpu_to_le16(0);
3167 ct_iocb->control_flags = cpu_to_le16(0);
3168 ct_iocb->timeout = 0;
3169 ct_iocb->cmd_dsd_count =
3170 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3171 ct_iocb->total_dsd_count =
3172 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3173 ct_iocb->req_bytecount =
3174 cpu_to_le32(bsg_job->request_payload.payload_len);
3175 ct_iocb->rsp_bytecount =
3176 cpu_to_le32(bsg_job->reply_payload.payload_len);
3178 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3179 &ct_iocb->req_dsd.address);
3180 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3182 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3183 &ct_iocb->rsp_dsd.address);
3184 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3187 cur_dsd = &ct_iocb->rsp_dsd;
3189 tot_dsds = bsg_job->reply_payload.sg_cnt;
3191 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3192 cont_a64_entry_t *cont_pkt;
3194 /* Allocate additional continuation packets? */
3195 if (avail_dsds == 0) {
3197 * Five DSDs are available in the Cont.
3200 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3201 vha->hw->req_q_map[0]);
3202 cur_dsd = cont_pkt->dsd;
3207 append_dsd64(&cur_dsd, sg);
3210 ct_iocb->entry_count = entry_count;
3212 sp->vha->qla_stats.control_requests++;
3216 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3218 uint16_t avail_dsds;
3219 struct dsd64 *cur_dsd;
3220 struct scatterlist *sg;
3222 uint16_t cmd_dsds, rsp_dsds;
3223 scsi_qla_host_t *vha = sp->vha;
3224 struct qla_hw_data *ha = vha->hw;
3225 struct bsg_job *bsg_job = sp->u.bsg_job;
3226 int entry_count = 1;
3227 cont_a64_entry_t *cont_pkt = NULL;
3229 ct_iocb->entry_type = CT_IOCB_TYPE;
3230 ct_iocb->entry_status = 0;
3231 ct_iocb->sys_define = 0;
3232 ct_iocb->handle = sp->handle;
3234 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3235 ct_iocb->vp_index = sp->vha->vp_idx;
3236 ct_iocb->comp_status = cpu_to_le16(0);
3238 cmd_dsds = bsg_job->request_payload.sg_cnt;
3239 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3241 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3242 ct_iocb->timeout = 0;
3243 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3244 ct_iocb->cmd_byte_count =
3245 cpu_to_le32(bsg_job->request_payload.payload_len);
3248 cur_dsd = ct_iocb->dsd;
3251 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3252 /* Allocate additional continuation packets? */
3253 if (avail_dsds == 0) {
3255 * Five DSDs are available in the Cont.
3258 cont_pkt = qla2x00_prep_cont_type1_iocb(
3259 vha, ha->req_q_map[0]);
3260 cur_dsd = cont_pkt->dsd;
3265 append_dsd64(&cur_dsd, sg);
3271 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3272 /* Allocate additional continuation packets? */
3273 if (avail_dsds == 0) {
3275 * Five DSDs are available in the Cont.
3278 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3280 cur_dsd = cont_pkt->dsd;
3285 append_dsd64(&cur_dsd, sg);
3288 ct_iocb->entry_count = entry_count;
3292 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3293 * @sp: command to send to the ISP
3295 * Returns non-zero if a failure occurred, else zero.
3298 qla82xx_start_scsi(srb_t *sp)
3301 unsigned long flags;
3302 struct scsi_cmnd *cmd;
3308 struct device_reg_82xx __iomem *reg;
3311 uint8_t additional_cdb_len;
3312 struct ct6_dsd *ctx;
3313 struct scsi_qla_host *vha = sp->vha;
3314 struct qla_hw_data *ha = vha->hw;
3315 struct req_que *req = NULL;
3316 struct rsp_que *rsp = NULL;
3318 /* Setup device pointers. */
3319 reg = &ha->iobase->isp82;
3320 cmd = GET_CMD_SP(sp);
3322 rsp = ha->rsp_q_map[0];
3324 /* So we know we haven't pci_map'ed anything yet */
3327 dbval = 0x04 | (ha->portnum << 5);
3329 /* Send marker if required */
3330 if (vha->marker_needed != 0) {
3331 if (qla2x00_marker(vha, ha->base_qpair,
3332 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3333 ql_log(ql_log_warn, vha, 0x300c,
3334 "qla2x00_marker failed for cmd=%p.\n", cmd);
3335 return QLA_FUNCTION_FAILED;
3337 vha->marker_needed = 0;
3340 /* Acquire ring specific lock */
3341 spin_lock_irqsave(&ha->hardware_lock, flags);
3343 handle = qla2xxx_get_next_handle(req);
3347 /* Map the sg table so we have an accurate count of sg entries needed */
3348 if (scsi_sg_count(cmd)) {
3349 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3350 scsi_sg_count(cmd), cmd->sc_data_direction);
3351 if (unlikely(!nseg))
3358 if (tot_dsds > ql2xshiftctondsd) {
3359 struct cmd_type_6 *cmd_pkt;
3360 uint16_t more_dsd_lists = 0;
3361 struct dsd_dma *dsd_ptr;
3364 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3365 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3366 ql_dbg(ql_dbg_io, vha, 0x300d,
3367 "Num of DSD list %d is than %d for cmd=%p.\n",
3368 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3373 if (more_dsd_lists <= ha->gbl_dsd_avail)
3374 goto sufficient_dsds;
3376 more_dsd_lists -= ha->gbl_dsd_avail;
3378 for (i = 0; i < more_dsd_lists; i++) {
3379 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3381 ql_log(ql_log_fatal, vha, 0x300e,
3382 "Failed to allocate memory for dsd_dma "
3383 "for cmd=%p.\n", cmd);
3387 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3388 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3389 if (!dsd_ptr->dsd_addr) {
3391 ql_log(ql_log_fatal, vha, 0x300f,
3392 "Failed to allocate memory for dsd_addr "
3393 "for cmd=%p.\n", cmd);
3396 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3397 ha->gbl_dsd_avail++;
3403 if (req->cnt < (req_cnt + 2)) {
3404 cnt = (uint16_t)rd_reg_dword_relaxed(
3405 ®->req_q_out[0]);
3406 if (req->ring_index < cnt)
3407 req->cnt = cnt - req->ring_index;
3409 req->cnt = req->length -
3410 (req->ring_index - cnt);
3411 if (req->cnt < (req_cnt + 2))
3415 ctx = sp->u.scmd.ct6_ctx =
3416 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3418 ql_log(ql_log_fatal, vha, 0x3010,
3419 "Failed to allocate ctx for cmd=%p.\n", cmd);
3423 memset(ctx, 0, sizeof(struct ct6_dsd));
3424 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3425 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3426 if (!ctx->fcp_cmnd) {
3427 ql_log(ql_log_fatal, vha, 0x3011,
3428 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3432 /* Initialize the DSD list and dma handle */
3433 INIT_LIST_HEAD(&ctx->dsd_list);
3434 ctx->dsd_use_cnt = 0;
3436 if (cmd->cmd_len > 16) {
3437 additional_cdb_len = cmd->cmd_len - 16;
3438 if ((cmd->cmd_len % 4) != 0) {
3439 /* SCSI command bigger than 16 bytes must be
3442 ql_log(ql_log_warn, vha, 0x3012,
3443 "scsi cmd len %d not multiple of 4 "
3444 "for cmd=%p.\n", cmd->cmd_len, cmd);
3445 goto queuing_error_fcp_cmnd;
3447 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3449 additional_cdb_len = 0;
3450 ctx->fcp_cmnd_len = 12 + 16 + 4;
3453 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3454 cmd_pkt->handle = make_handle(req->id, handle);
3456 /* Zero out remaining portion of packet. */
3457 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3458 clr_ptr = (uint32_t *)cmd_pkt + 2;
3459 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3460 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3462 /* Set NPORT-ID and LUN number*/
3463 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3464 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3465 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3466 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3467 cmd_pkt->vp_index = sp->vha->vp_idx;
3469 /* Build IOCB segments */
3470 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3471 goto queuing_error_fcp_cmnd;
3473 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3474 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3476 /* build FCP_CMND IU */
3477 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3478 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3480 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3481 ctx->fcp_cmnd->additional_cdb_len |= 1;
3482 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3483 ctx->fcp_cmnd->additional_cdb_len |= 2;
3485 /* Populate the FCP_PRIO. */
3486 if (ha->flags.fcp_prio_enabled)
3487 ctx->fcp_cmnd->task_attribute |=
3488 sp->fcport->fcp_prio << 3;
3490 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3492 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3493 additional_cdb_len);
3494 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3496 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3497 put_unaligned_le64(ctx->fcp_cmnd_dma,
3498 &cmd_pkt->fcp_cmnd_dseg_address);
3500 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3501 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3502 /* Set total data segment count. */
3503 cmd_pkt->entry_count = (uint8_t)req_cnt;
3504 /* Specify response queue number where
3505 * completion should happen
3507 cmd_pkt->entry_status = (uint8_t) rsp->id;
3509 struct cmd_type_7 *cmd_pkt;
3511 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3512 if (req->cnt < (req_cnt + 2)) {
3513 cnt = (uint16_t)rd_reg_dword_relaxed(
3514 ®->req_q_out[0]);
3515 if (req->ring_index < cnt)
3516 req->cnt = cnt - req->ring_index;
3518 req->cnt = req->length -
3519 (req->ring_index - cnt);
3521 if (req->cnt < (req_cnt + 2))
3524 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3525 cmd_pkt->handle = make_handle(req->id, handle);
3527 /* Zero out remaining portion of packet. */
3528 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3529 clr_ptr = (uint32_t *)cmd_pkt + 2;
3530 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3531 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3533 /* Set NPORT-ID and LUN number*/
3534 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3535 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3536 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3537 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3538 cmd_pkt->vp_index = sp->vha->vp_idx;
3540 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3541 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3542 sizeof(cmd_pkt->lun));
3544 /* Populate the FCP_PRIO. */
3545 if (ha->flags.fcp_prio_enabled)
3546 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3548 /* Load SCSI command packet. */
3549 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3550 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3552 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3554 /* Build IOCB segments */
3555 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3557 /* Set total data segment count. */
3558 cmd_pkt->entry_count = (uint8_t)req_cnt;
3559 /* Specify response queue number where
3560 * completion should happen.
3562 cmd_pkt->entry_status = (uint8_t) rsp->id;
3565 /* Build command packet. */
3566 req->current_outstanding_cmd = handle;
3567 req->outstanding_cmds[handle] = sp;
3568 sp->handle = handle;
3569 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3570 req->cnt -= req_cnt;
3573 /* Adjust ring index. */
3575 if (req->ring_index == req->length) {
3576 req->ring_index = 0;
3577 req->ring_ptr = req->ring;
3581 sp->flags |= SRB_DMA_VALID;
3583 /* Set chip new ring index. */
3584 /* write, read and verify logic */
3585 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3587 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3589 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3591 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3592 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3597 /* Manage unprocessed RIO/ZIO commands in response queue. */
3598 if (vha->flags.process_response_queue &&
3599 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3600 qla24xx_process_response_queue(vha, rsp);
3602 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3605 queuing_error_fcp_cmnd:
3606 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3609 scsi_dma_unmap(cmd);
3611 if (sp->u.scmd.crc_ctx) {
3612 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3613 sp->u.scmd.crc_ctx = NULL;
3615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3617 return QLA_FUNCTION_FAILED;
3621 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3623 struct srb_iocb *aio = &sp->u.iocb_cmd;
3624 scsi_qla_host_t *vha = sp->vha;
3625 struct req_que *req = sp->qpair->req;
3626 srb_t *orig_sp = sp->cmd_sp;
3628 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3629 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3630 abt_iocb->entry_count = 1;
3631 abt_iocb->handle = make_handle(req->id, sp->handle);
3633 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3634 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3635 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3636 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3638 abt_iocb->handle_to_abort =
3639 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3640 aio->u.abt.cmd_hndl);
3641 abt_iocb->vp_index = vha->vp_idx;
3642 abt_iocb->req_que_no = aio->u.abt.req_que_no;
3644 /* need to pass original sp */
3646 qla_nvme_abort_set_option(abt_iocb, orig_sp);
3648 /* Send the command to the firmware */
3653 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3657 mbx->entry_type = MBX_IOCB_TYPE;
3658 mbx->handle = sp->handle;
3659 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3661 for (i = 0; i < sz; i++)
3662 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3666 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3668 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3669 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3670 ct_pkt->handle = sp->handle;
3673 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3674 struct nack_to_isp *nack)
3676 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3678 nack->entry_type = NOTIFY_ACK_TYPE;
3679 nack->entry_count = 1;
3680 nack->ox_id = ntfy->ox_id;
3682 nack->u.isp24.handle = sp->handle;
3683 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3684 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3685 nack->u.isp24.flags = ntfy->u.isp24.flags &
3686 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3688 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3689 nack->u.isp24.status = ntfy->u.isp24.status;
3690 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3691 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3692 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3693 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3694 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3695 nack->u.isp24.srr_flags = 0;
3696 nack->u.isp24.srr_reject_code = 0;
3697 nack->u.isp24.srr_reject_code_expl = 0;
3698 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3702 * Build NVME LS request
3705 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3707 struct srb_iocb *nvme;
3709 nvme = &sp->u.iocb_cmd;
3710 cmd_pkt->entry_type = PT_LS4_REQUEST;
3711 cmd_pkt->entry_count = 1;
3712 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3714 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3715 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3716 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3718 cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3719 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3720 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3721 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3723 cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3724 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3725 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3726 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3730 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3734 vce->entry_type = VP_CTRL_IOCB_TYPE;
3735 vce->handle = sp->handle;
3736 vce->entry_count = 1;
3737 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3738 vce->vp_count = cpu_to_le16(1);
3741 * index map in firmware starts with 1; decrement index
3742 * this is ok as we never use index 0
3744 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3745 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3746 vce->vp_idx_map[map] |= 1 << pos;
3750 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3752 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3753 logio->control_flags =
3754 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3756 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3757 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3758 logio->port_id[1] = sp->fcport->d_id.b.area;
3759 logio->port_id[2] = sp->fcport->d_id.b.domain;
3760 logio->vp_index = sp->fcport->vha->vp_idx;
3764 qla2x00_start_sp(srb_t *sp)
3766 int rval = QLA_SUCCESS;
3767 scsi_qla_host_t *vha = sp->vha;
3768 struct qla_hw_data *ha = vha->hw;
3769 struct qla_qpair *qp = sp->qpair;
3771 unsigned long flags;
3773 if (vha->hw->flags.eeh_busy)
3776 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3777 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3780 ql_log(ql_log_warn, vha, 0x700c,
3781 "qla2x00_alloc_iocbs failed.\n");
3787 IS_FWI2_CAPABLE(ha) ?
3788 qla24xx_login_iocb(sp, pkt) :
3789 qla2x00_login_iocb(sp, pkt);
3792 qla24xx_prli_iocb(sp, pkt);
3794 case SRB_LOGOUT_CMD:
3795 IS_FWI2_CAPABLE(ha) ?
3796 qla24xx_logout_iocb(sp, pkt) :
3797 qla2x00_logout_iocb(sp, pkt);
3799 case SRB_ELS_CMD_RPT:
3800 case SRB_ELS_CMD_HST:
3801 qla24xx_els_iocb(sp, pkt);
3804 IS_FWI2_CAPABLE(ha) ?
3805 qla24xx_ct_iocb(sp, pkt) :
3806 qla2x00_ct_iocb(sp, pkt);
3809 IS_FWI2_CAPABLE(ha) ?
3810 qla24xx_adisc_iocb(sp, pkt) :
3811 qla2x00_adisc_iocb(sp, pkt);
3815 qlafx00_tm_iocb(sp, pkt) :
3816 qla24xx_tm_iocb(sp, pkt);
3818 case SRB_FXIOCB_DCMD:
3819 case SRB_FXIOCB_BCMD:
3820 qlafx00_fxdisc_iocb(sp, pkt);
3823 qla_nvme_ls(sp, pkt);
3827 qlafx00_abort_iocb(sp, pkt) :
3828 qla24xx_abort_iocb(sp, pkt);
3831 qla24xx_els_logo_iocb(sp, pkt);
3833 case SRB_CT_PTHRU_CMD:
3834 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3837 qla2x00_mb_iocb(sp, pkt);
3839 case SRB_NACK_PLOGI:
3842 qla2x00_send_notify_ack_iocb(sp, pkt);
3845 qla25xx_ctrlvp_iocb(sp, pkt);
3848 qla24xx_prlo_iocb(sp, pkt);
3854 if (sp->start_timer)
3855 add_timer(&sp->u.iocb_cmd.timer);
3858 qla2x00_start_iocbs(vha, qp->req);
3860 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3865 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3866 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3868 uint16_t avail_dsds;
3869 struct dsd64 *cur_dsd;
3870 uint32_t req_data_len = 0;
3871 uint32_t rsp_data_len = 0;
3872 struct scatterlist *sg;
3874 int entry_count = 1;
3875 struct bsg_job *bsg_job = sp->u.bsg_job;
3877 /*Update entry type to indicate bidir command */
3878 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3880 /* Set the transfer direction, in this set both flags
3881 * Also set the BD_WRAP_BACK flag, firmware will take care
3882 * assigning DID=SID for outgoing pkts.
3884 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3885 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3886 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3889 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3890 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3891 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3892 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3894 vha->bidi_stats.transfer_bytes += req_data_len;
3895 vha->bidi_stats.io_count++;
3897 vha->qla_stats.output_bytes += req_data_len;
3898 vha->qla_stats.output_requests++;
3900 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3901 * are bundled in continuation iocb
3904 cur_dsd = &cmd_pkt->fcp_dsd;
3908 for_each_sg(bsg_job->request_payload.sg_list, sg,
3909 bsg_job->request_payload.sg_cnt, index) {
3910 cont_a64_entry_t *cont_pkt;
3912 /* Allocate additional continuation packets */
3913 if (avail_dsds == 0) {
3914 /* Continuation type 1 IOCB can accomodate
3917 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3918 cur_dsd = cont_pkt->dsd;
3922 append_dsd64(&cur_dsd, sg);
3925 /* For read request DSD will always goes to continuation IOCB
3926 * and follow the write DSD. If there is room on the current IOCB
3927 * then it is added to that IOCB else new continuation IOCB is
3930 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3931 bsg_job->reply_payload.sg_cnt, index) {
3932 cont_a64_entry_t *cont_pkt;
3934 /* Allocate additional continuation packets */
3935 if (avail_dsds == 0) {
3936 /* Continuation type 1 IOCB can accomodate
3939 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3940 cur_dsd = cont_pkt->dsd;
3944 append_dsd64(&cur_dsd, sg);
3947 /* This value should be same as number of IOCB required for this cmd */
3948 cmd_pkt->entry_count = entry_count;
3952 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3955 struct qla_hw_data *ha = vha->hw;
3956 unsigned long flags;
3961 struct cmd_bidir *cmd_pkt = NULL;
3962 struct rsp_que *rsp;
3963 struct req_que *req;
3964 int rval = EXT_STATUS_OK;
3968 rsp = ha->rsp_q_map[0];
3971 /* Send marker if required */
3972 if (vha->marker_needed != 0) {
3973 if (qla2x00_marker(vha, ha->base_qpair,
3974 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3975 return EXT_STATUS_MAILBOX;
3976 vha->marker_needed = 0;
3979 /* Acquire ring specific lock */
3980 spin_lock_irqsave(&ha->hardware_lock, flags);
3982 handle = qla2xxx_get_next_handle(req);
3984 rval = EXT_STATUS_BUSY;
3988 /* Calculate number of IOCB required */
3989 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3991 /* Check for room on request queue. */
3992 if (req->cnt < req_cnt + 2) {
3993 if (IS_SHADOW_REG_CAPABLE(ha)) {
3994 cnt = *req->out_ptr;
3996 cnt = rd_reg_dword_relaxed(req->req_q_out);
3997 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4001 if (req->ring_index < cnt)
4002 req->cnt = cnt - req->ring_index;
4004 req->cnt = req->length -
4005 (req->ring_index - cnt);
4007 if (req->cnt < req_cnt + 2) {
4008 rval = EXT_STATUS_BUSY;
4012 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4013 cmd_pkt->handle = make_handle(req->id, handle);
4015 /* Zero out remaining portion of packet. */
4016 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4017 clr_ptr = (uint32_t *)cmd_pkt + 2;
4018 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4020 /* Set NPORT-ID (of vha)*/
4021 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4022 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4023 cmd_pkt->port_id[1] = vha->d_id.b.area;
4024 cmd_pkt->port_id[2] = vha->d_id.b.domain;
4026 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4027 cmd_pkt->entry_status = (uint8_t) rsp->id;
4028 /* Build command packet. */
4029 req->current_outstanding_cmd = handle;
4030 req->outstanding_cmds[handle] = sp;
4031 sp->handle = handle;
4032 req->cnt -= req_cnt;
4034 /* Send the command to the firmware */
4036 qla2x00_start_iocbs(vha, req);
4038 spin_unlock_irqrestore(&ha->hardware_lock, flags);