2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include "qla_target.h"
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
23 if (vha->vp_idx && vha->timer_active) {
24 del_timer_sync(&vha->timer);
25 vha->timer_active = 0;
30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
33 struct qla_hw_data *ha = vha->hw;
36 /* Find an empty slot and assign an vp_id */
37 mutex_lock(&ha->vport_lock);
38 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39 if (vp_id > ha->max_npiv_vports) {
40 ql_dbg(ql_dbg_vport, vha, 0xa000,
41 "vp_id %d is bigger than max-supported %d.\n",
42 vp_id, ha->max_npiv_vports);
43 mutex_unlock(&ha->vport_lock);
47 set_bit(vp_id, ha->vp_idx_map);
51 spin_lock_irqsave(&ha->vport_slock, flags);
52 list_add_tail(&vha->list, &ha->vp_list);
53 spin_unlock_irqrestore(&ha->vport_slock, flags);
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 qlt_update_vp_map(vha, SET_VP_IDX);
57 spin_unlock_irqrestore(&ha->hardware_lock, flags);
59 mutex_unlock(&ha->vport_lock);
64 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
67 struct qla_hw_data *ha = vha->hw;
68 unsigned long flags = 0;
71 mutex_lock(&ha->vport_lock);
73 * Wait for all pending activities to finish before removing vport from
75 * Lock needs to be held for safe removal from the list (it
76 * ensures no active vp_list traversal while the vport is removed
79 for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
80 wait_event_timeout(vha->vref_waitq,
81 atomic_read(&vha->vref_count), HZ);
83 spin_lock_irqsave(&ha->vport_slock, flags);
84 if (atomic_read(&vha->vref_count)) {
85 ql_dbg(ql_dbg_vport, vha, 0xfffa,
86 "vha->vref_count=%u timeout\n", vha->vref_count.counter);
87 vha->vref_count = (atomic_t)ATOMIC_INIT(0);
90 qlt_update_vp_map(vha, RESET_VP_IDX);
91 spin_unlock_irqrestore(&ha->vport_slock, flags);
95 clear_bit(vp_id, ha->vp_idx_map);
97 mutex_unlock(&ha->vport_lock);
100 static scsi_qla_host_t *
101 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
103 scsi_qla_host_t *vha;
104 struct scsi_qla_host *tvha;
107 spin_lock_irqsave(&ha->vport_slock, flags);
108 /* Locate matching device in database. */
109 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
110 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
111 spin_unlock_irqrestore(&ha->vport_slock, flags);
115 spin_unlock_irqrestore(&ha->vport_slock, flags);
120 * qla2x00_mark_vp_devices_dead
121 * Updates fcport state when device goes offline.
124 * ha = adapter block pointer.
125 * fcport = port structure pointer.
133 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
137 * This function, if called in contexts other than vp create, disable
138 * or delete, please make sure this is synchronized with the
143 list_for_each_entry(fcport, &vha->vp_fcports, list) {
144 ql_dbg(ql_dbg_vport, vha, 0xa001,
145 "Marking port dead, loop_id=0x%04x : %x.\n",
146 fcport->loop_id, fcport->vha->vp_idx);
148 qla2x00_mark_device_lost(vha, fcport, 0, 0);
149 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
154 qla24xx_disable_vp(scsi_qla_host_t *vha)
157 int ret = QLA_SUCCESS;
160 if (vha->hw->flags.fw_started)
161 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
163 atomic_set(&vha->loop_state, LOOP_DOWN);
164 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
165 list_for_each_entry(fcport, &vha->vp_fcports, list)
166 fcport->logout_on_delete = 0;
168 qla2x00_mark_all_devices_lost(vha, 0);
170 /* Remove port id from vp target map */
171 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
172 qlt_update_vp_map(vha, RESET_AL_PA);
173 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
175 qla2x00_mark_vp_devices_dead(vha);
176 atomic_set(&vha->vp_state, VP_FAILED);
177 vha->flags.management_server_logged_in = 0;
178 if (ret == QLA_SUCCESS) {
179 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
181 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
188 qla24xx_enable_vp(scsi_qla_host_t *vha)
191 struct qla_hw_data *ha = vha->hw;
192 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
194 /* Check if physical ha port is Up */
195 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
196 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
197 !(ha->current_topology & ISP_CFG_F)) {
198 vha->vp_err_state = VP_ERR_PORTDWN;
199 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
200 ql_dbg(ql_dbg_taskm, vha, 0x800b,
201 "%s skip enable. loop_state %x topo %x\n",
202 __func__, base_vha->loop_state.counter,
203 ha->current_topology);
208 /* Initialize the new vport unless it is a persistent port */
209 mutex_lock(&ha->vport_lock);
210 ret = qla24xx_modify_vp_config(vha);
211 mutex_unlock(&ha->vport_lock);
213 if (ret != QLA_SUCCESS) {
214 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
218 ql_dbg(ql_dbg_taskm, vha, 0x801a,
219 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
223 ql_dbg(ql_dbg_taskm, vha, 0x801b,
224 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
229 qla24xx_configure_vp(scsi_qla_host_t *vha)
231 struct fc_vport *fc_vport;
234 fc_vport = vha->fc_vport;
236 ql_dbg(ql_dbg_vport, vha, 0xa002,
237 "%s: change request #3.\n", __func__);
238 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
239 if (ret != QLA_SUCCESS) {
240 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
241 "receiving of RSCN requests: 0x%x.\n", ret);
244 /* Corresponds to SCR enabled */
245 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
248 vha->flags.online = 1;
249 if (qla24xx_configure_vhba(vha))
252 atomic_set(&vha->vp_state, VP_ACTIVE);
253 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
257 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
259 scsi_qla_host_t *vha;
260 struct qla_hw_data *ha = rsp->hw;
264 spin_lock_irqsave(&ha->vport_slock, flags);
265 list_for_each_entry(vha, &ha->vp_list, list) {
267 if (test_bit(VPORT_DELETE, &vha->dpc_flags))
270 atomic_inc(&vha->vref_count);
271 spin_unlock_irqrestore(&ha->vport_slock, flags);
274 case MBA_LIP_OCCURRED:
278 case MBA_POINT_TO_POINT:
279 case MBA_CHG_IN_CONNECTION:
280 ql_dbg(ql_dbg_async, vha, 0x5024,
281 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
283 qla2x00_async_event(vha, rsp, mb);
285 case MBA_PORT_UPDATE:
286 case MBA_RSCN_UPDATE:
287 if ((mb[3] & 0xff) == vha->vp_idx) {
288 ql_dbg(ql_dbg_async, vha, 0x5024,
289 "Async_event for VP[%d], mb=0x%x vha=%p\n",
291 qla2x00_async_event(vha, rsp, mb);
296 spin_lock_irqsave(&ha->vport_slock, flags);
297 atomic_dec(&vha->vref_count);
298 wake_up(&vha->vref_waitq);
302 spin_unlock_irqrestore(&ha->vport_slock, flags);
306 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
311 * To exclusively reset vport, we need to log it out first.
312 * Note: This control_vp can fail if ISP reset is already
313 * issued, this is expected, as the vp would be already
314 * logged out due to ISP reset.
316 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
317 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
318 list_for_each_entry(fcport, &vha->vp_fcports, list)
319 fcport->logout_on_delete = 0;
323 * Physical port will do most of the abort and recovery work. We can
324 * just treat it as a loop down
326 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
327 atomic_set(&vha->loop_state, LOOP_DOWN);
328 qla2x00_mark_all_devices_lost(vha, 0);
330 if (!atomic_read(&vha->loop_down_timer))
331 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
334 ql_dbg(ql_dbg_taskm, vha, 0x801d,
335 "Scheduling enable of Vport %d.\n", vha->vp_idx);
337 return qla24xx_enable_vp(vha);
341 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
343 struct qla_hw_data *ha = vha->hw;
344 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
346 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
347 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
349 /* Check if Fw is ready to configure VP first */
350 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
351 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
352 /* VP acquired. complete port configuration */
353 ql_dbg(ql_dbg_dpc, vha, 0x4014,
354 "Configure VP scheduled.\n");
355 qla24xx_configure_vp(vha);
356 ql_dbg(ql_dbg_dpc, vha, 0x4015,
357 "Configure VP end.\n");
362 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
363 ql_dbg(ql_dbg_dpc, vha, 0x4016,
364 "FCPort update scheduled.\n");
365 qla2x00_update_fcports(vha);
366 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
367 ql_dbg(ql_dbg_dpc, vha, 0x4017,
368 "FCPort update end.\n");
371 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
372 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
373 atomic_read(&vha->loop_state) != LOOP_DOWN) {
375 if (!vha->relogin_jif ||
376 time_after_eq(jiffies, vha->relogin_jif)) {
377 vha->relogin_jif = jiffies + HZ;
378 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
380 ql_dbg(ql_dbg_dpc, vha, 0x4018,
381 "Relogin needed scheduled.\n");
382 qla24xx_post_relogin_work(vha);
386 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
387 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
388 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
391 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
392 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
393 ql_dbg(ql_dbg_dpc, vha, 0x401a,
394 "Loop resync scheduled.\n");
395 qla2x00_loop_resync(vha);
396 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
397 ql_dbg(ql_dbg_dpc, vha, 0x401b,
398 "Loop resync end.\n");
402 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
403 "Exiting %s.\n", __func__);
408 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
410 struct qla_hw_data *ha = vha->hw;
412 unsigned long flags = 0;
416 if (list_empty(&ha->vp_list))
419 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
421 if (!(ha->current_topology & ISP_CFG_F))
424 spin_lock_irqsave(&ha->vport_slock, flags);
425 list_for_each_entry(vp, &ha->vp_list, list) {
427 atomic_inc(&vp->vref_count);
428 spin_unlock_irqrestore(&ha->vport_slock, flags);
430 qla2x00_do_dpc_vp(vp);
432 spin_lock_irqsave(&ha->vport_slock, flags);
433 atomic_dec(&vp->vref_count);
436 spin_unlock_irqrestore(&ha->vport_slock, flags);
440 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
443 struct qla_hw_data *ha = base_vha->hw;
444 scsi_qla_host_t *vha;
445 uint8_t port_name[WWN_SIZE];
447 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
448 return VPCERR_UNSUPPORTED;
450 /* Check up the F/W and H/W support NPIV */
451 if (!ha->flags.npiv_supported)
452 return VPCERR_UNSUPPORTED;
454 /* Check up whether npiv supported switch presented */
455 if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
456 return VPCERR_NO_FABRIC_SUPP;
458 /* Check up unique WWPN */
459 u64_to_wwn(fc_vport->port_name, port_name);
460 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
461 return VPCERR_BAD_WWN;
462 vha = qla24xx_find_vhost_by_name(ha, port_name);
464 return VPCERR_BAD_WWN;
466 /* Check up max-npiv-supports */
467 if (ha->num_vhosts > ha->max_npiv_vports) {
468 ql_dbg(ql_dbg_vport, vha, 0xa004,
469 "num_vhosts %ud is bigger "
470 "than max_npiv_vports %ud.\n",
471 ha->num_vhosts, ha->max_npiv_vports);
472 return VPCERR_UNSUPPORTED;
478 qla24xx_create_vhost(struct fc_vport *fc_vport)
480 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
481 struct qla_hw_data *ha = base_vha->hw;
482 scsi_qla_host_t *vha;
483 struct scsi_host_template *sht = &qla2xxx_driver_template;
484 struct Scsi_Host *host;
486 vha = qla2x00_create_host(sht, ha);
488 ql_log(ql_log_warn, vha, 0xa005,
489 "scsi_host_alloc() failed for vport.\n");
494 fc_vport->dd_data = vha;
496 u64_to_wwn(fc_vport->node_name, vha->node_name);
497 u64_to_wwn(fc_vport->port_name, vha->port_name);
499 vha->fc_vport = fc_vport;
500 vha->device_flags = 0;
501 vha->vp_idx = qla24xx_allocate_vp_id(vha);
502 if (vha->vp_idx > ha->max_npiv_vports) {
503 ql_dbg(ql_dbg_vport, vha, 0xa006,
504 "Couldn't allocate vp_id.\n");
505 goto create_vhost_failed;
507 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
512 * To fix the issue of processing a parent's RSCN for the vport before
513 * its SCR is complete.
515 set_bit(VP_SCR_NEEDED, &vha->vp_flags);
516 atomic_set(&vha->loop_state, LOOP_DOWN);
517 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
519 qla2x00_start_timer(vha, WATCH_INTERVAL);
521 vha->req = base_vha->req;
522 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
523 host->can_queue = base_vha->req->length + 128;
524 host->cmd_per_lun = 3;
525 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
526 host->max_cmd_len = 32;
528 host->max_cmd_len = MAX_CMDSZ;
529 host->max_channel = MAX_BUSES - 1;
530 host->max_lun = ql2xmaxlun;
531 host->unique_id = host->host_no;
532 host->max_id = ha->max_fibre_devices;
533 host->transportt = qla2xxx_transport_vport_template;
535 ql_dbg(ql_dbg_vport, vha, 0xa007,
536 "Detect vport hba %ld at address = %p.\n",
539 vha->flags.init_done = 1;
541 mutex_lock(&ha->vport_lock);
542 set_bit(vha->vp_idx, ha->vp_idx_map);
543 ha->cur_vport_count++;
544 mutex_unlock(&ha->vport_lock);
553 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
555 struct qla_hw_data *ha = vha->hw;
556 uint16_t que_id = req->id;
558 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
559 sizeof(request_t), req->ring, req->dma);
563 ha->req_q_map[que_id] = NULL;
564 mutex_lock(&ha->vport_lock);
565 clear_bit(que_id, ha->req_qid_map);
566 mutex_unlock(&ha->vport_lock);
568 kfree(req->outstanding_cmds);
574 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
576 struct qla_hw_data *ha = vha->hw;
577 uint16_t que_id = rsp->id;
579 if (rsp->msix && rsp->msix->have_irq) {
580 free_irq(rsp->msix->vector, rsp->msix->handle);
581 rsp->msix->have_irq = 0;
582 rsp->msix->in_use = 0;
583 rsp->msix->handle = NULL;
585 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
586 sizeof(response_t), rsp->ring, rsp->dma);
590 ha->rsp_q_map[que_id] = NULL;
591 mutex_lock(&ha->vport_lock);
592 clear_bit(que_id, ha->rsp_qid_map);
593 mutex_unlock(&ha->vport_lock);
600 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
602 int ret = QLA_SUCCESS;
604 if (req && vha->flags.qpairs_req_created) {
605 req->options |= BIT_0;
606 ret = qla25xx_init_req_que(vha, req);
607 if (ret != QLA_SUCCESS)
608 return QLA_FUNCTION_FAILED;
610 qla25xx_free_req_que(vha, req);
617 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
619 int ret = QLA_SUCCESS;
621 if (rsp && vha->flags.qpairs_rsp_created) {
622 rsp->options |= BIT_0;
623 ret = qla25xx_init_rsp_que(vha, rsp);
624 if (ret != QLA_SUCCESS)
625 return QLA_FUNCTION_FAILED;
627 qla25xx_free_rsp_que(vha, rsp);
633 /* Delete all queues for a given vhost */
635 qla25xx_delete_queues(struct scsi_qla_host *vha)
638 struct req_que *req = NULL;
639 struct rsp_que *rsp = NULL;
640 struct qla_hw_data *ha = vha->hw;
641 struct qla_qpair *qpair, *tqpair;
643 if (ql2xmqsupport || ql2xnvmeenable) {
644 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
646 qla2xxx_delete_qpair(vha, qpair);
648 /* Delete request queues */
649 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
650 req = ha->req_q_map[cnt];
651 if (req && test_bit(cnt, ha->req_qid_map)) {
652 ret = qla25xx_delete_req_que(vha, req);
653 if (ret != QLA_SUCCESS) {
654 ql_log(ql_log_warn, vha, 0x00ea,
655 "Couldn't delete req que %d.\n",
662 /* Delete response queues */
663 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
664 rsp = ha->rsp_q_map[cnt];
665 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
666 ret = qla25xx_delete_rsp_que(vha, rsp);
667 if (ret != QLA_SUCCESS) {
668 ql_log(ql_log_warn, vha, 0x00eb,
669 "Couldn't delete rsp que %d.\n",
681 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
682 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
685 struct req_que *req = NULL;
686 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
687 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
692 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
694 ql_log(ql_log_fatal, base_vha, 0x00d9,
695 "Failed to allocate memory for request queue.\n");
699 req->length = REQUEST_ENTRY_CNT_24XX;
700 req->ring = dma_alloc_coherent(&ha->pdev->dev,
701 (req->length + 1) * sizeof(request_t),
702 &req->dma, GFP_KERNEL);
703 if (req->ring == NULL) {
704 ql_log(ql_log_fatal, base_vha, 0x00da,
705 "Failed to allocate memory for request_ring.\n");
709 ret = qla2x00_alloc_outstanding_cmds(ha, req);
710 if (ret != QLA_SUCCESS)
713 mutex_lock(&ha->mq_lock);
714 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
715 if (que_id >= ha->max_req_queues) {
716 mutex_unlock(&ha->mq_lock);
717 ql_log(ql_log_warn, base_vha, 0x00db,
718 "No resources to create additional request queue.\n");
721 set_bit(que_id, ha->req_qid_map);
722 ha->req_q_map[que_id] = req;
724 req->vp_idx = vp_idx;
727 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
728 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
729 que_id, req->rid, req->vp_idx, req->qos);
730 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
731 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
732 que_id, req->rid, req->vp_idx, req->qos);
736 req->rsp = ha->rsp_q_map[rsp_que];
737 /* Use alternate PCI bus number */
740 /* Use alternate PCI devfn */
743 req->options = options;
745 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
746 "options=0x%x.\n", req->options);
747 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
748 "options=0x%x.\n", req->options);
749 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
750 req->outstanding_cmds[cnt] = NULL;
751 req->current_outstanding_cmd = 1;
753 req->ring_ptr = req->ring;
755 req->cnt = req->length;
757 reg = ISP_QUE_REG(ha, que_id);
758 req->req_q_in = ®->isp25mq.req_q_in;
759 req->req_q_out = ®->isp25mq.req_q_out;
760 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
761 req->out_ptr = (void *)(req->ring + req->length);
762 mutex_unlock(&ha->mq_lock);
763 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
764 "ring_ptr=%p ring_index=%d, "
765 "cnt=%d id=%d max_q_depth=%d.\n",
766 req->ring_ptr, req->ring_index,
767 req->cnt, req->id, req->max_q_depth);
768 ql_dbg(ql_dbg_init, base_vha, 0x00de,
769 "ring_ptr=%p ring_index=%d, "
770 "cnt=%d id=%d max_q_depth=%d.\n",
771 req->ring_ptr, req->ring_index, req->cnt,
772 req->id, req->max_q_depth);
775 ret = qla25xx_init_req_que(base_vha, req);
776 if (ret != QLA_SUCCESS) {
777 ql_log(ql_log_fatal, base_vha, 0x00df,
778 "%s failed.\n", __func__);
779 mutex_lock(&ha->mq_lock);
780 clear_bit(que_id, ha->req_qid_map);
781 mutex_unlock(&ha->mq_lock);
784 vha->flags.qpairs_req_created = 1;
790 qla25xx_free_req_que(base_vha, req);
795 static void qla_do_work(struct work_struct *work)
798 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
799 struct scsi_qla_host *vha;
800 struct qla_hw_data *ha = qpair->hw;
802 spin_lock_irqsave(&qpair->qp_lock, flags);
803 vha = pci_get_drvdata(ha->pdev);
804 qla24xx_process_response_queue(vha, qpair->rsp);
805 spin_unlock_irqrestore(&qpair->qp_lock, flags);
809 /* create response queue */
811 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
812 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
815 struct rsp_que *rsp = NULL;
816 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
817 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
821 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
823 ql_log(ql_log_warn, base_vha, 0x0066,
824 "Failed to allocate memory for response queue.\n");
828 rsp->length = RESPONSE_ENTRY_CNT_MQ;
829 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
830 (rsp->length + 1) * sizeof(response_t),
831 &rsp->dma, GFP_KERNEL);
832 if (rsp->ring == NULL) {
833 ql_log(ql_log_warn, base_vha, 0x00e1,
834 "Failed to allocate memory for response ring.\n");
838 mutex_lock(&ha->mq_lock);
839 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
840 if (que_id >= ha->max_rsp_queues) {
841 mutex_unlock(&ha->mq_lock);
842 ql_log(ql_log_warn, base_vha, 0x00e2,
843 "No resources to create additional request queue.\n");
846 set_bit(que_id, ha->rsp_qid_map);
848 rsp->msix = qpair->msix;
850 ha->rsp_q_map[que_id] = rsp;
852 rsp->vp_idx = vp_idx;
854 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
855 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
856 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
857 /* Use alternate PCI bus number */
860 /* Use alternate PCI devfn */
863 /* Enable MSIX handshake mode on for uncapable adapters */
864 if (!IS_MSIX_NACK_CAPABLE(ha))
867 /* Set option to indicate response queue creation */
870 rsp->options = options;
872 reg = ISP_QUE_REG(ha, que_id);
873 rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
874 rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
875 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
876 mutex_unlock(&ha->mq_lock);
877 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
878 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
879 rsp->options, rsp->id, rsp->rsp_q_in,
881 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
882 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
883 rsp->options, rsp->id, rsp->rsp_q_in,
886 ret = qla25xx_request_irq(ha, qpair, qpair->msix,
887 QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
892 ret = qla25xx_init_rsp_que(base_vha, rsp);
893 if (ret != QLA_SUCCESS) {
894 ql_log(ql_log_fatal, base_vha, 0x00e7,
895 "%s failed.\n", __func__);
896 mutex_lock(&ha->mq_lock);
897 clear_bit(que_id, ha->rsp_qid_map);
898 mutex_unlock(&ha->mq_lock);
901 vha->flags.qpairs_rsp_created = 1;
905 qla2x00_init_response_q_entries(rsp);
907 INIT_WORK(&qpair->q_work, qla_do_work);
911 qla25xx_free_rsp_que(base_vha, rsp);
916 static void qla_ctrlvp_sp_done(srb_t *sp, int res)
920 /* don't free sp here. Let the caller do the free */
924 * qla24xx_control_vp() - Enable a virtual port for given host
925 * @vha: adapter block pointer
926 * @cmd: command type to be sent for enable virtual port
928 * Return: qla2xxx local function return status code.
930 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
932 int rval = QLA_MEMORY_ALLOC_FAILED;
933 struct qla_hw_data *ha = vha->hw;
934 int vp_index = vha->vp_idx;
935 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
936 DECLARE_COMPLETION_ONSTACK(comp);
939 ql_dbg(ql_dbg_vport, vha, 0x10c1,
940 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
942 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
943 return QLA_PARAMETER_ERROR;
945 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
949 sp->type = SRB_CTRL_VP;
950 sp->name = "ctrl_vp";
952 sp->done = qla_ctrlvp_sp_done;
953 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
954 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
955 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
956 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
958 rval = qla2x00_start_sp(sp);
959 if (rval != QLA_SUCCESS) {
960 ql_dbg(ql_dbg_async, vha, 0xffff,
961 "%s: %s Failed submission. %x.\n",
962 __func__, sp->name, rval);
966 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
967 sp->name, sp->handle);
969 wait_for_completion(&comp);
974 case QLA_FUNCTION_TIMEOUT:
975 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
976 __func__, sp->name, rval);
979 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
983 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
984 __func__, sp->name, rval);