2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
102 static const struct kernel_param_ops srp_tmo_ops;
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
138 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
140 static struct scsi_transport_template *ib_srp_transport_template;
141 static struct workqueue_struct *srp_remove_wq;
143 static struct ib_client srp_client = {
146 .remove = srp_remove_one
149 static struct ib_sa_client srp_sa_client;
151 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
153 int tmo = *(int *)kp->arg;
156 return sprintf(buffer, "%d", tmo);
158 return sprintf(buffer, "off");
161 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
165 res = srp_parse_tmo(&tmo, val);
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
179 *(int *)kp->arg = tmo;
185 static const struct kernel_param_ops srp_tmo_ops = {
190 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192 return (struct srp_target_port *) host->hostdata;
195 static const char *srp_target_info(struct Scsi_Host *host)
197 return host_to_target(host)->target_name;
200 static int srp_target_is_topspin(struct srp_target_port *target)
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
205 return topspin_workarounds &&
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
210 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 enum dma_data_direction direction)
216 iu = kmalloc(sizeof *iu, gfp_mask);
220 iu->buf = kzalloc(size, gfp_mask);
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
230 iu->direction = direction;
242 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
253 static void srp_qp_event(struct ib_event *event, void *context)
255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
259 static int srp_init_qp(struct srp_target_port *target,
262 struct ib_qp_attr *attr;
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
281 ret = ib_modify_qp(qp, attr,
292 static int srp_new_cm_id(struct srp_rdma_ch *ch)
294 struct srp_target_port *target = ch->target;
295 struct ib_cm_id *new_cm_id;
297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
313 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->mr_pool_size;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
338 struct srp_fr_desc *d;
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
357 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
364 int i, ret = -EINVAL;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
386 list_add_tail(&d->entry, &pool->free_list);
393 srp_destroy_fr_pool(pool);
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
404 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
406 struct srp_fr_desc *d = NULL;
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
414 spin_unlock_irqrestore(&pool->lock, flags);
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
428 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
440 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
442 struct srp_device *dev = target->srp_host->srp_dev;
444 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
445 dev->max_pages_per_mr);
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
452 * Drain the qp before destroying it. This avoids that the receive
453 * completion handler can access the queue pair while it is
456 static void srp_destroy_qp(struct srp_rdma_ch *ch)
459 ib_destroy_qp(ch->qp);
462 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
464 struct srp_target_port *target = ch->target;
465 struct srp_device *dev = target->srp_host->srp_dev;
466 struct ib_qp_init_attr *init_attr;
467 struct ib_cq *recv_cq, *send_cq;
469 struct ib_fmr_pool *fmr_pool = NULL;
470 struct srp_fr_pool *fr_pool = NULL;
471 const int m = dev->use_fast_reg ? 3 : 1;
474 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
478 /* queue_size + 1 for ib_drain_rq() */
479 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480 ch->comp_vector, IB_POLL_SOFTIRQ);
481 if (IS_ERR(recv_cq)) {
482 ret = PTR_ERR(recv_cq);
486 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487 ch->comp_vector, IB_POLL_DIRECT);
488 if (IS_ERR(send_cq)) {
489 ret = PTR_ERR(send_cq);
493 init_attr->event_handler = srp_qp_event;
494 init_attr->cap.max_send_wr = m * target->queue_size;
495 init_attr->cap.max_recv_wr = target->queue_size + 1;
496 init_attr->cap.max_recv_sge = 1;
497 init_attr->cap.max_send_sge = 1;
498 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
499 init_attr->qp_type = IB_QPT_RC;
500 init_attr->send_cq = send_cq;
501 init_attr->recv_cq = recv_cq;
503 qp = ib_create_qp(dev->pd, init_attr);
509 ret = srp_init_qp(target, qp);
513 if (dev->use_fast_reg) {
514 fr_pool = srp_alloc_fr_pool(target);
515 if (IS_ERR(fr_pool)) {
516 ret = PTR_ERR(fr_pool);
517 shost_printk(KERN_WARNING, target->scsi_host, PFX
518 "FR pool allocation failed (%d)\n", ret);
521 } else if (dev->use_fmr) {
522 fmr_pool = srp_alloc_fmr_pool(target);
523 if (IS_ERR(fmr_pool)) {
524 ret = PTR_ERR(fmr_pool);
525 shost_printk(KERN_WARNING, target->scsi_host, PFX
526 "FMR pool allocation failed (%d)\n", ret);
534 ib_free_cq(ch->recv_cq);
536 ib_free_cq(ch->send_cq);
539 ch->recv_cq = recv_cq;
540 ch->send_cq = send_cq;
542 if (dev->use_fast_reg) {
544 srp_destroy_fr_pool(ch->fr_pool);
545 ch->fr_pool = fr_pool;
546 } else if (dev->use_fmr) {
548 ib_destroy_fmr_pool(ch->fmr_pool);
549 ch->fmr_pool = fmr_pool;
570 * Note: this function may be called without srp_alloc_iu_bufs() having been
571 * invoked. Hence the ch->[rt]x_ring checks.
573 static void srp_free_ch_ib(struct srp_target_port *target,
574 struct srp_rdma_ch *ch)
576 struct srp_device *dev = target->srp_host->srp_dev;
583 ib_destroy_cm_id(ch->cm_id);
587 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
591 if (dev->use_fast_reg) {
593 srp_destroy_fr_pool(ch->fr_pool);
594 } else if (dev->use_fmr) {
596 ib_destroy_fmr_pool(ch->fmr_pool);
600 ib_free_cq(ch->send_cq);
601 ib_free_cq(ch->recv_cq);
604 * Avoid that the SCSI error handler tries to use this channel after
605 * it has been freed. The SCSI error handler can namely continue
606 * trying to perform recovery actions after scsi_remove_host()
612 ch->send_cq = ch->recv_cq = NULL;
615 for (i = 0; i < target->queue_size; ++i)
616 srp_free_iu(target->srp_host, ch->rx_ring[i]);
621 for (i = 0; i < target->queue_size; ++i)
622 srp_free_iu(target->srp_host, ch->tx_ring[i]);
628 static void srp_path_rec_completion(int status,
629 struct ib_sa_path_rec *pathrec,
632 struct srp_rdma_ch *ch = ch_ptr;
633 struct srp_target_port *target = ch->target;
637 shost_printk(KERN_ERR, target->scsi_host,
638 PFX "Got failed path rec status %d\n", status);
644 static int srp_lookup_path(struct srp_rdma_ch *ch)
646 struct srp_target_port *target = ch->target;
649 ch->path.numb_path = 1;
651 init_completion(&ch->done);
653 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654 target->srp_host->srp_dev->dev,
655 target->srp_host->port,
657 IB_SA_PATH_REC_SERVICE_ID |
658 IB_SA_PATH_REC_DGID |
659 IB_SA_PATH_REC_SGID |
660 IB_SA_PATH_REC_NUMB_PATH |
662 SRP_PATH_REC_TIMEOUT_MS,
664 srp_path_rec_completion,
665 ch, &ch->path_query);
666 if (ch->path_query_id < 0)
667 return ch->path_query_id;
669 ret = wait_for_completion_interruptible(&ch->done);
674 shost_printk(KERN_WARNING, target->scsi_host,
675 PFX "Path record query failed\n");
680 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
682 struct srp_target_port *target = ch->target;
684 struct ib_cm_req_param param;
685 struct srp_login_req priv;
689 req = kzalloc(sizeof *req, GFP_KERNEL);
693 req->param.primary_path = &ch->path;
694 req->param.alternate_path = NULL;
695 req->param.service_id = target->service_id;
696 req->param.qp_num = ch->qp->qp_num;
697 req->param.qp_type = ch->qp->qp_type;
698 req->param.private_data = &req->priv;
699 req->param.private_data_len = sizeof req->priv;
700 req->param.flow_control = 1;
702 get_random_bytes(&req->param.starting_psn, 4);
703 req->param.starting_psn &= 0xffffff;
706 * Pick some arbitrary defaults here; we could make these
707 * module parameters if anyone cared about setting them.
709 req->param.responder_resources = 4;
710 req->param.remote_cm_response_timeout = 20;
711 req->param.local_cm_response_timeout = 20;
712 req->param.retry_count = target->tl_retry_count;
713 req->param.rnr_retry_count = 7;
714 req->param.max_cm_retries = 15;
716 req->priv.opcode = SRP_LOGIN_REQ;
718 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
719 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720 SRP_BUF_FORMAT_INDIRECT);
721 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
722 SRP_MULTICHAN_SINGLE);
724 * In the published SRP specification (draft rev. 16a), the
725 * port identifier format is 8 bytes of ID extension followed
726 * by 8 bytes of GUID. Older drafts put the two halves in the
727 * opposite order, so that the GUID comes first.
729 * Targets conforming to these obsolete drafts can be
730 * recognized by the I/O Class they report.
732 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733 memcpy(req->priv.initiator_port_id,
734 &target->sgid.global.interface_id, 8);
735 memcpy(req->priv.initiator_port_id + 8,
736 &target->initiator_ext, 8);
737 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
738 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
740 memcpy(req->priv.initiator_port_id,
741 &target->initiator_ext, 8);
742 memcpy(req->priv.initiator_port_id + 8,
743 &target->sgid.global.interface_id, 8);
744 memcpy(req->priv.target_port_id, &target->id_ext, 8);
745 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
749 * Topspin/Cisco SRP targets will reject our login unless we
750 * zero out the first 8 bytes of our initiator port ID and set
751 * the second 8 bytes to the local node GUID.
753 if (srp_target_is_topspin(target)) {
754 shost_printk(KERN_DEBUG, target->scsi_host,
755 PFX "Topspin/Cisco initiator port ID workaround "
756 "activated for target GUID %016llx\n",
757 be64_to_cpu(target->ioc_guid));
758 memset(req->priv.initiator_port_id, 0, 8);
759 memcpy(req->priv.initiator_port_id + 8,
760 &target->srp_host->srp_dev->dev->node_guid, 8);
763 status = ib_send_cm_req(ch->cm_id, &req->param);
770 static bool srp_queue_remove_work(struct srp_target_port *target)
772 bool changed = false;
774 spin_lock_irq(&target->lock);
775 if (target->state != SRP_TARGET_REMOVED) {
776 target->state = SRP_TARGET_REMOVED;
779 spin_unlock_irq(&target->lock);
782 queue_work(srp_remove_wq, &target->remove_work);
787 static void srp_disconnect_target(struct srp_target_port *target)
789 struct srp_rdma_ch *ch;
792 /* XXX should send SRP_I_LOGOUT request */
794 for (i = 0; i < target->ch_count; i++) {
796 ch->connected = false;
797 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798 shost_printk(KERN_DEBUG, target->scsi_host,
799 PFX "Sending CM DREQ failed\n");
804 static void srp_free_req_data(struct srp_target_port *target,
805 struct srp_rdma_ch *ch)
807 struct srp_device *dev = target->srp_host->srp_dev;
808 struct ib_device *ibdev = dev->dev;
809 struct srp_request *req;
815 for (i = 0; i < target->req_ring_size; ++i) {
816 req = &ch->req_ring[i];
817 if (dev->use_fast_reg) {
820 kfree(req->fmr_list);
821 kfree(req->map_page);
823 if (req->indirect_dma_addr) {
824 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825 target->indirect_size,
828 kfree(req->indirect_desc);
835 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
837 struct srp_target_port *target = ch->target;
838 struct srp_device *srp_dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = srp_dev->dev;
840 struct srp_request *req;
843 int i, ret = -ENOMEM;
845 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
850 for (i = 0; i < target->req_ring_size; ++i) {
851 req = &ch->req_ring[i];
852 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
856 if (srp_dev->use_fast_reg) {
857 req->fr_list = mr_list;
859 req->fmr_list = mr_list;
860 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861 sizeof(void *), GFP_KERNEL);
865 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
866 if (!req->indirect_desc)
869 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870 target->indirect_size,
872 if (ib_dma_mapping_error(ibdev, dma_addr))
875 req->indirect_dma_addr = dma_addr;
884 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885 * @shost: SCSI host whose attributes to remove from sysfs.
887 * Note: Any attributes defined in the host template and that did not exist
888 * before invocation of this function will be ignored.
890 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
892 struct device_attribute **attr;
894 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895 device_remove_file(&shost->shost_dev, *attr);
898 static void srp_remove_target(struct srp_target_port *target)
900 struct srp_rdma_ch *ch;
903 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
905 srp_del_scsi_host_attr(target->scsi_host);
906 srp_rport_get(target->rport);
907 srp_remove_host(target->scsi_host);
908 scsi_remove_host(target->scsi_host);
909 srp_stop_rport_timers(target->rport);
910 srp_disconnect_target(target);
911 for (i = 0; i < target->ch_count; i++) {
913 srp_free_ch_ib(target, ch);
915 cancel_work_sync(&target->tl_err_work);
916 srp_rport_put(target->rport);
917 for (i = 0; i < target->ch_count; i++) {
919 srp_free_req_data(target, ch);
924 spin_lock(&target->srp_host->target_lock);
925 list_del(&target->list);
926 spin_unlock(&target->srp_host->target_lock);
928 scsi_host_put(target->scsi_host);
931 static void srp_remove_work(struct work_struct *work)
933 struct srp_target_port *target =
934 container_of(work, struct srp_target_port, remove_work);
936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
938 srp_remove_target(target);
941 static void srp_rport_delete(struct srp_rport *rport)
943 struct srp_target_port *target = rport->lld_data;
945 srp_queue_remove_work(target);
949 * srp_connected_ch() - number of connected channels
950 * @target: SRP target port.
952 static int srp_connected_ch(struct srp_target_port *target)
956 for (i = 0; i < target->ch_count; i++)
957 c += target->ch[i].connected;
962 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
964 struct srp_target_port *target = ch->target;
967 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
969 ret = srp_lookup_path(ch);
974 init_completion(&ch->done);
975 ret = srp_send_req(ch, multich);
978 ret = wait_for_completion_interruptible(&ch->done);
983 * The CM event handling code will set status to
984 * SRP_PORT_REDIRECT if we get a port redirect REJ
985 * back, or SRP_DLID_REDIRECT if we get a lid/qp
991 ch->connected = true;
994 case SRP_PORT_REDIRECT:
995 ret = srp_lookup_path(ch);
1000 case SRP_DLID_REDIRECT:
1003 case SRP_STALE_CONN:
1004 shost_printk(KERN_ERR, target->scsi_host, PFX
1005 "giving up on stale connection\n");
1015 return ret <= 0 ? ret : -ENODEV;
1018 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1020 srp_handle_qp_err(cq, wc, "INV RKEY");
1023 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1026 struct ib_send_wr *bad_wr;
1027 struct ib_send_wr wr = {
1028 .opcode = IB_WR_LOCAL_INV,
1032 .ex.invalidate_rkey = rkey,
1035 wr.wr_cqe = &req->reg_cqe;
1036 req->reg_cqe.done = srp_inv_rkey_err_done;
1037 return ib_post_send(ch->qp, &wr, &bad_wr);
1040 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1041 struct srp_rdma_ch *ch,
1042 struct srp_request *req)
1044 struct srp_target_port *target = ch->target;
1045 struct srp_device *dev = target->srp_host->srp_dev;
1046 struct ib_device *ibdev = dev->dev;
1049 if (!scsi_sglist(scmnd) ||
1050 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1054 if (dev->use_fast_reg) {
1055 struct srp_fr_desc **pfr;
1057 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1058 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1060 shost_printk(KERN_ERR, target->scsi_host, PFX
1061 "Queueing INV WR for rkey %#x failed (%d)\n",
1062 (*pfr)->mr->rkey, res);
1063 queue_work(system_long_wq,
1064 &target->tl_err_work);
1068 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1070 } else if (dev->use_fmr) {
1071 struct ib_pool_fmr **pfmr;
1073 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074 ib_fmr_pool_unmap(*pfmr);
1077 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078 scmnd->sc_data_direction);
1082 * srp_claim_req - Take ownership of the scmnd associated with a request.
1083 * @ch: SRP RDMA channel.
1084 * @req: SRP request.
1085 * @sdev: If not NULL, only take ownership for this SCSI device.
1086 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087 * ownership of @req->scmnd if it equals @scmnd.
1090 * Either NULL or a pointer to the SCSI command the caller became owner of.
1092 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1093 struct srp_request *req,
1094 struct scsi_device *sdev,
1095 struct scsi_cmnd *scmnd)
1097 unsigned long flags;
1099 spin_lock_irqsave(&ch->lock, flags);
1101 (!sdev || req->scmnd->device == sdev) &&
1102 (!scmnd || req->scmnd == scmnd)) {
1108 spin_unlock_irqrestore(&ch->lock, flags);
1114 * srp_free_req() - Unmap data and adjust ch->req_lim.
1115 * @ch: SRP RDMA channel.
1116 * @req: Request to be freed.
1117 * @scmnd: SCSI command associated with @req.
1118 * @req_lim_delta: Amount to be added to @target->req_lim.
1120 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1123 unsigned long flags;
1125 srp_unmap_data(scmnd, ch, req);
1127 spin_lock_irqsave(&ch->lock, flags);
1128 ch->req_lim += req_lim_delta;
1129 spin_unlock_irqrestore(&ch->lock, flags);
1132 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_device *sdev, int result)
1135 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1138 srp_free_req(ch, req, scmnd, 0);
1139 scmnd->result = result;
1140 scmnd->scsi_done(scmnd);
1144 static void srp_terminate_io(struct srp_rport *rport)
1146 struct srp_target_port *target = rport->lld_data;
1147 struct srp_rdma_ch *ch;
1148 struct Scsi_Host *shost = target->scsi_host;
1149 struct scsi_device *sdev;
1153 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154 * is not safe. Hence the warning statement below.
1156 shost_for_each_device(sdev, shost)
1157 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1159 for (i = 0; i < target->ch_count; i++) {
1160 ch = &target->ch[i];
1162 for (j = 0; j < target->req_ring_size; ++j) {
1163 struct srp_request *req = &ch->req_ring[j];
1165 srp_finish_req(ch, req, NULL,
1166 DID_TRANSPORT_FAILFAST << 16);
1172 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174 * srp_reset_device() or srp_reset_host() calls will occur while this function
1175 * is in progress. One way to realize that is not to call this function
1176 * directly but to call srp_reconnect_rport() instead since that last function
1177 * serializes calls of this function via rport->mutex and also blocks
1178 * srp_queuecommand() calls before invoking this function.
1180 static int srp_rport_reconnect(struct srp_rport *rport)
1182 struct srp_target_port *target = rport->lld_data;
1183 struct srp_rdma_ch *ch;
1185 bool multich = false;
1187 srp_disconnect_target(target);
1189 if (target->state == SRP_TARGET_SCANNING)
1193 * Now get a new local CM ID so that we avoid confusing the target in
1194 * case things are really fouled up. Doing so also ensures that all CM
1195 * callbacks will have finished before a new QP is allocated.
1197 for (i = 0; i < target->ch_count; i++) {
1198 ch = &target->ch[i];
1199 ret += srp_new_cm_id(ch);
1201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
1203 for (j = 0; j < target->req_ring_size; ++j) {
1204 struct srp_request *req = &ch->req_ring[j];
1206 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
1212 * Whether or not creating a new CM ID succeeded, create a new
1213 * QP. This guarantees that all completion callback function
1214 * invocations have finished before request resetting starts.
1216 ret += srp_create_ch_ib(ch);
1218 INIT_LIST_HEAD(&ch->free_tx);
1219 for (j = 0; j < target->queue_size; ++j)
1220 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1223 target->qp_in_error = false;
1225 for (i = 0; i < target->ch_count; i++) {
1226 ch = &target->ch[i];
1229 ret = srp_connect_ch(ch, multich);
1234 shost_printk(KERN_INFO, target->scsi_host,
1235 PFX "reconnect succeeded\n");
1240 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241 unsigned int dma_len, u32 rkey)
1243 struct srp_direct_buf *desc = state->desc;
1245 WARN_ON_ONCE(!dma_len);
1247 desc->va = cpu_to_be64(dma_addr);
1248 desc->key = cpu_to_be32(rkey);
1249 desc->len = cpu_to_be32(dma_len);
1251 state->total_len += dma_len;
1256 static int srp_map_finish_fmr(struct srp_map_state *state,
1257 struct srp_rdma_ch *ch)
1259 struct srp_target_port *target = ch->target;
1260 struct srp_device *dev = target->srp_host->srp_dev;
1261 struct ib_pool_fmr *fmr;
1264 if (state->fmr.next >= state->fmr.end)
1267 WARN_ON_ONCE(!dev->use_fmr);
1269 if (state->npages == 0)
1272 if (state->npages == 1 && target->global_mr) {
1273 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274 target->global_mr->rkey);
1278 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1279 state->npages, io_addr);
1281 return PTR_ERR(fmr);
1283 *state->fmr.next++ = fmr;
1286 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287 state->dma_len, fmr->fmr->rkey);
1296 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1298 srp_handle_qp_err(cq, wc, "FAST REG");
1301 static int srp_map_finish_fr(struct srp_map_state *state,
1302 struct srp_request *req,
1303 struct srp_rdma_ch *ch, int sg_nents)
1305 struct srp_target_port *target = ch->target;
1306 struct srp_device *dev = target->srp_host->srp_dev;
1307 struct ib_send_wr *bad_wr;
1308 struct ib_reg_wr wr;
1309 struct srp_fr_desc *desc;
1313 if (state->fr.next >= state->fr.end)
1316 WARN_ON_ONCE(!dev->use_fast_reg);
1318 if (sg_nents == 1 && target->global_mr) {
1319 srp_map_desc(state, sg_dma_address(state->sg),
1320 sg_dma_len(state->sg),
1321 target->global_mr->rkey);
1325 desc = srp_fr_pool_get(ch->fr_pool);
1329 rkey = ib_inc_rkey(desc->mr->rkey);
1330 ib_update_fast_reg_key(desc->mr, rkey);
1332 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1333 if (unlikely(n < 0))
1336 req->reg_cqe.done = srp_reg_mr_err_done;
1339 wr.wr.opcode = IB_WR_REG_MR;
1340 wr.wr.wr_cqe = &req->reg_cqe;
1342 wr.wr.send_flags = 0;
1344 wr.key = desc->mr->rkey;
1345 wr.access = (IB_ACCESS_LOCAL_WRITE |
1346 IB_ACCESS_REMOTE_READ |
1347 IB_ACCESS_REMOTE_WRITE);
1349 *state->fr.next++ = desc;
1352 srp_map_desc(state, desc->mr->iova,
1353 desc->mr->length, desc->mr->rkey);
1355 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1362 static int srp_map_sg_entry(struct srp_map_state *state,
1363 struct srp_rdma_ch *ch,
1364 struct scatterlist *sg, int sg_index)
1366 struct srp_target_port *target = ch->target;
1367 struct srp_device *dev = target->srp_host->srp_dev;
1368 struct ib_device *ibdev = dev->dev;
1369 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1370 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1371 unsigned int len = 0;
1374 WARN_ON_ONCE(!dma_len);
1377 unsigned offset = dma_addr & ~dev->mr_page_mask;
1378 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1379 ret = srp_map_finish_fmr(state, ch);
1384 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1387 state->base_dma_addr = dma_addr;
1388 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1389 state->dma_len += len;
1395 * If the last entry of the MR wasn't a full page, then we need to
1396 * close it out and start a new one -- we can only merge at page
1400 if (len != dev->mr_page_size)
1401 ret = srp_map_finish_fmr(state, ch);
1405 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1406 struct srp_request *req, struct scatterlist *scat,
1409 struct scatterlist *sg;
1412 state->desc = req->indirect_desc;
1413 state->pages = req->map_page;
1414 state->fmr.next = req->fmr_list;
1415 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1417 for_each_sg(scat, sg, count, i) {
1418 ret = srp_map_sg_entry(state, ch, sg, i);
1423 ret = srp_map_finish_fmr(state, ch);
1430 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1431 struct srp_request *req, struct scatterlist *scat,
1434 state->desc = req->indirect_desc;
1435 state->fr.next = req->fr_list;
1436 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1445 n = srp_map_finish_fr(state, req, ch, count);
1446 if (unlikely(n < 0))
1450 for (i = 0; i < n; i++)
1451 state->sg = sg_next(state->sg);
1457 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1458 struct srp_request *req, struct scatterlist *scat,
1461 struct srp_target_port *target = ch->target;
1462 struct srp_device *dev = target->srp_host->srp_dev;
1463 struct scatterlist *sg;
1466 state->desc = req->indirect_desc;
1467 for_each_sg(scat, sg, count, i) {
1468 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1469 ib_sg_dma_len(dev->dev, sg),
1470 target->global_mr->rkey);
1477 * Register the indirect data buffer descriptor with the HCA.
1479 * Note: since the indirect data buffer descriptor has been allocated with
1480 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1483 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1484 void **next_mr, void **end_mr, u32 idb_len,
1487 struct srp_target_port *target = ch->target;
1488 struct srp_device *dev = target->srp_host->srp_dev;
1489 struct srp_map_state state;
1490 struct srp_direct_buf idb_desc;
1492 struct scatterlist idb_sg[1];
1495 memset(&state, 0, sizeof(state));
1496 memset(&idb_desc, 0, sizeof(idb_desc));
1497 state.gen.next = next_mr;
1498 state.gen.end = end_mr;
1499 state.desc = &idb_desc;
1500 state.base_dma_addr = req->indirect_dma_addr;
1501 state.dma_len = idb_len;
1503 if (dev->use_fast_reg) {
1505 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1506 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1507 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1508 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1510 ret = srp_map_finish_fr(&state, req, ch, 1);
1513 } else if (dev->use_fmr) {
1514 state.pages = idb_pages;
1515 state.pages[0] = (req->indirect_dma_addr &
1518 ret = srp_map_finish_fmr(&state, ch);
1525 *idb_rkey = idb_desc.key;
1531 * srp_map_data() - map SCSI data buffer onto an SRP request
1532 * @scmnd: SCSI command to map
1533 * @ch: SRP RDMA channel
1536 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1539 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1540 struct srp_request *req)
1542 struct srp_target_port *target = ch->target;
1543 struct scatterlist *scat;
1544 struct srp_cmd *cmd = req->cmd->buf;
1545 int len, nents, count, ret;
1546 struct srp_device *dev;
1547 struct ib_device *ibdev;
1548 struct srp_map_state state;
1549 struct srp_indirect_buf *indirect_hdr;
1550 u32 idb_len, table_len;
1554 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1555 return sizeof (struct srp_cmd);
1557 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1558 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1559 shost_printk(KERN_WARNING, target->scsi_host,
1560 PFX "Unhandled data direction %d\n",
1561 scmnd->sc_data_direction);
1565 nents = scsi_sg_count(scmnd);
1566 scat = scsi_sglist(scmnd);
1568 dev = target->srp_host->srp_dev;
1571 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1572 if (unlikely(count == 0))
1575 fmt = SRP_DATA_DESC_DIRECT;
1576 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1578 if (count == 1 && target->global_mr) {
1580 * The midlayer only generated a single gather/scatter
1581 * entry, or DMA mapping coalesced everything to a
1582 * single entry. So a direct descriptor along with
1583 * the DMA MR suffices.
1585 struct srp_direct_buf *buf = (void *) cmd->add_data;
1587 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1588 buf->key = cpu_to_be32(target->global_mr->rkey);
1589 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1596 * We have more than one scatter/gather entry, so build our indirect
1597 * descriptor table, trying to merge as many entries as we can.
1599 indirect_hdr = (void *) cmd->add_data;
1601 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1602 target->indirect_size, DMA_TO_DEVICE);
1604 memset(&state, 0, sizeof(state));
1605 if (dev->use_fast_reg)
1606 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1607 else if (dev->use_fmr)
1608 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1610 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1611 req->nmdesc = state.nmdesc;
1615 /* We've mapped the request, now pull as much of the indirect
1616 * descriptor table as we can into the command buffer. If this
1617 * target is not using an external indirect table, we are
1618 * guaranteed to fit into the command, as the SCSI layer won't
1619 * give us more S/G entries than we allow.
1621 if (state.ndesc == 1) {
1623 * Memory registration collapsed the sg-list into one entry,
1624 * so use a direct descriptor.
1626 struct srp_direct_buf *buf = (void *) cmd->add_data;
1628 *buf = req->indirect_desc[0];
1632 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1633 !target->allow_ext_sg)) {
1634 shost_printk(KERN_ERR, target->scsi_host,
1635 "Could not fit S/G list into SRP_CMD\n");
1640 count = min(state.ndesc, target->cmd_sg_cnt);
1641 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1642 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1644 fmt = SRP_DATA_DESC_INDIRECT;
1645 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1646 len += count * sizeof (struct srp_direct_buf);
1648 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1649 count * sizeof (struct srp_direct_buf));
1651 if (!target->global_mr) {
1652 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1653 idb_len, &idb_rkey);
1658 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1662 indirect_hdr->table_desc.key = idb_rkey;
1663 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1664 indirect_hdr->len = cpu_to_be32(state.total_len);
1666 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1667 cmd->data_out_desc_cnt = count;
1669 cmd->data_in_desc_cnt = count;
1671 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1675 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1676 cmd->buf_fmt = fmt << 4;
1683 srp_unmap_data(scmnd, ch, req);
1684 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1690 * Return an IU and possible credit to the free pool
1692 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1693 enum srp_iu_type iu_type)
1695 unsigned long flags;
1697 spin_lock_irqsave(&ch->lock, flags);
1698 list_add(&iu->list, &ch->free_tx);
1699 if (iu_type != SRP_IU_RSP)
1701 spin_unlock_irqrestore(&ch->lock, flags);
1705 * Must be called with ch->lock held to protect req_lim and free_tx.
1706 * If IU is not sent, it must be returned using srp_put_tx_iu().
1709 * An upper limit for the number of allocated information units for each
1711 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1712 * more than Scsi_Host.can_queue requests.
1713 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1714 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1715 * one unanswered SRP request to an initiator.
1717 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1718 enum srp_iu_type iu_type)
1720 struct srp_target_port *target = ch->target;
1721 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1724 ib_process_cq_direct(ch->send_cq, -1);
1726 if (list_empty(&ch->free_tx))
1729 /* Initiator responses to target requests do not consume credits */
1730 if (iu_type != SRP_IU_RSP) {
1731 if (ch->req_lim <= rsv) {
1732 ++target->zero_req_lim;
1739 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1740 list_del(&iu->list);
1744 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1746 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1747 struct srp_rdma_ch *ch = cq->cq_context;
1749 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1750 srp_handle_qp_err(cq, wc, "SEND");
1754 list_add(&iu->list, &ch->free_tx);
1757 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1759 struct srp_target_port *target = ch->target;
1761 struct ib_send_wr wr, *bad_wr;
1763 list.addr = iu->dma;
1765 list.lkey = target->lkey;
1767 iu->cqe.done = srp_send_done;
1770 wr.wr_cqe = &iu->cqe;
1773 wr.opcode = IB_WR_SEND;
1774 wr.send_flags = IB_SEND_SIGNALED;
1776 return ib_post_send(ch->qp, &wr, &bad_wr);
1779 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1781 struct srp_target_port *target = ch->target;
1782 struct ib_recv_wr wr, *bad_wr;
1785 list.addr = iu->dma;
1786 list.length = iu->size;
1787 list.lkey = target->lkey;
1789 iu->cqe.done = srp_recv_done;
1792 wr.wr_cqe = &iu->cqe;
1796 return ib_post_recv(ch->qp, &wr, &bad_wr);
1799 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1801 struct srp_target_port *target = ch->target;
1802 struct srp_request *req;
1803 struct scsi_cmnd *scmnd;
1804 unsigned long flags;
1806 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1807 spin_lock_irqsave(&ch->lock, flags);
1808 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1809 spin_unlock_irqrestore(&ch->lock, flags);
1811 ch->tsk_mgmt_status = -1;
1812 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1813 ch->tsk_mgmt_status = rsp->data[3];
1814 complete(&ch->tsk_mgmt_done);
1816 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1818 req = (void *)scmnd->host_scribble;
1819 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1822 shost_printk(KERN_ERR, target->scsi_host,
1823 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1824 rsp->tag, ch - target->ch, ch->qp->qp_num);
1826 spin_lock_irqsave(&ch->lock, flags);
1827 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1828 spin_unlock_irqrestore(&ch->lock, flags);
1832 scmnd->result = rsp->status;
1834 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1835 memcpy(scmnd->sense_buffer, rsp->data +
1836 be32_to_cpu(rsp->resp_data_len),
1837 min_t(int, be32_to_cpu(rsp->sense_data_len),
1838 SCSI_SENSE_BUFFERSIZE));
1841 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1842 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1843 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1844 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1845 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1846 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1847 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1848 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1850 srp_free_req(ch, req, scmnd,
1851 be32_to_cpu(rsp->req_lim_delta));
1853 scmnd->host_scribble = NULL;
1854 scmnd->scsi_done(scmnd);
1858 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1861 struct srp_target_port *target = ch->target;
1862 struct ib_device *dev = target->srp_host->srp_dev->dev;
1863 unsigned long flags;
1867 spin_lock_irqsave(&ch->lock, flags);
1868 ch->req_lim += req_delta;
1869 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1870 spin_unlock_irqrestore(&ch->lock, flags);
1873 shost_printk(KERN_ERR, target->scsi_host, PFX
1874 "no IU available to send response\n");
1878 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1879 memcpy(iu->buf, rsp, len);
1880 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1882 err = srp_post_send(ch, iu, len);
1884 shost_printk(KERN_ERR, target->scsi_host, PFX
1885 "unable to post response: %d\n", err);
1886 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1892 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1893 struct srp_cred_req *req)
1895 struct srp_cred_rsp rsp = {
1896 .opcode = SRP_CRED_RSP,
1899 s32 delta = be32_to_cpu(req->req_lim_delta);
1901 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1902 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1903 "problems processing SRP_CRED_REQ\n");
1906 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1907 struct srp_aer_req *req)
1909 struct srp_target_port *target = ch->target;
1910 struct srp_aer_rsp rsp = {
1911 .opcode = SRP_AER_RSP,
1914 s32 delta = be32_to_cpu(req->req_lim_delta);
1916 shost_printk(KERN_ERR, target->scsi_host, PFX
1917 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1919 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1920 shost_printk(KERN_ERR, target->scsi_host, PFX
1921 "problems processing SRP_AER_REQ\n");
1924 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1926 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1927 struct srp_rdma_ch *ch = cq->cq_context;
1928 struct srp_target_port *target = ch->target;
1929 struct ib_device *dev = target->srp_host->srp_dev->dev;
1933 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1934 srp_handle_qp_err(cq, wc, "RECV");
1938 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1941 opcode = *(u8 *) iu->buf;
1944 shost_printk(KERN_ERR, target->scsi_host,
1945 PFX "recv completion, opcode 0x%02x\n", opcode);
1946 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1947 iu->buf, wc->byte_len, true);
1952 srp_process_rsp(ch, iu->buf);
1956 srp_process_cred_req(ch, iu->buf);
1960 srp_process_aer_req(ch, iu->buf);
1964 /* XXX Handle target logout */
1965 shost_printk(KERN_WARNING, target->scsi_host,
1966 PFX "Got target logout request\n");
1970 shost_printk(KERN_WARNING, target->scsi_host,
1971 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1975 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1978 res = srp_post_recv(ch, iu);
1980 shost_printk(KERN_ERR, target->scsi_host,
1981 PFX "Recv failed with error code %d\n", res);
1985 * srp_tl_err_work() - handle a transport layer error
1986 * @work: Work structure embedded in an SRP target port.
1988 * Note: This function may get invoked before the rport has been created,
1989 * hence the target->rport test.
1991 static void srp_tl_err_work(struct work_struct *work)
1993 struct srp_target_port *target;
1995 target = container_of(work, struct srp_target_port, tl_err_work);
1997 srp_start_tl_fail_timers(target->rport);
2000 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2003 struct srp_rdma_ch *ch = cq->cq_context;
2004 struct srp_target_port *target = ch->target;
2006 if (ch->connected && !target->qp_in_error) {
2007 shost_printk(KERN_ERR, target->scsi_host,
2008 PFX "failed %s status %s (%d) for CQE %p\n",
2009 opname, ib_wc_status_msg(wc->status), wc->status,
2011 queue_work(system_long_wq, &target->tl_err_work);
2013 target->qp_in_error = true;
2016 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2018 struct srp_target_port *target = host_to_target(shost);
2019 struct srp_rport *rport = target->rport;
2020 struct srp_rdma_ch *ch;
2021 struct srp_request *req;
2023 struct srp_cmd *cmd;
2024 struct ib_device *dev;
2025 unsigned long flags;
2029 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2032 * The SCSI EH thread is the only context from which srp_queuecommand()
2033 * can get invoked for blocked devices (SDEV_BLOCK /
2034 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2035 * locking the rport mutex if invoked from inside the SCSI EH.
2038 mutex_lock(&rport->mutex);
2040 scmnd->result = srp_chkready(target->rport);
2041 if (unlikely(scmnd->result))
2044 WARN_ON_ONCE(scmnd->request->tag < 0);
2045 tag = blk_mq_unique_tag(scmnd->request);
2046 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2047 idx = blk_mq_unique_tag_to_tag(tag);
2048 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2049 dev_name(&shost->shost_gendev), tag, idx,
2050 target->req_ring_size);
2052 spin_lock_irqsave(&ch->lock, flags);
2053 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2054 spin_unlock_irqrestore(&ch->lock, flags);
2059 req = &ch->req_ring[idx];
2060 dev = target->srp_host->srp_dev->dev;
2061 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2064 scmnd->host_scribble = (void *) req;
2067 memset(cmd, 0, sizeof *cmd);
2069 cmd->opcode = SRP_CMD;
2070 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2072 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2077 len = srp_map_data(scmnd, ch, req);
2079 shost_printk(KERN_ERR, target->scsi_host,
2080 PFX "Failed to map data (%d)\n", len);
2082 * If we ran out of memory descriptors (-ENOMEM) because an
2083 * application is queuing many requests with more than
2084 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2085 * to reduce queue depth temporarily.
2087 scmnd->result = len == -ENOMEM ?
2088 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2092 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2095 if (srp_post_send(ch, iu, len)) {
2096 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2104 mutex_unlock(&rport->mutex);
2109 srp_unmap_data(scmnd, ch, req);
2112 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2115 * Avoid that the loops that iterate over the request ring can
2116 * encounter a dangling SCSI command pointer.
2121 if (scmnd->result) {
2122 scmnd->scsi_done(scmnd);
2125 ret = SCSI_MLQUEUE_HOST_BUSY;
2132 * Note: the resources allocated in this function are freed in
2135 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2137 struct srp_target_port *target = ch->target;
2140 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2144 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2149 for (i = 0; i < target->queue_size; ++i) {
2150 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2152 GFP_KERNEL, DMA_FROM_DEVICE);
2153 if (!ch->rx_ring[i])
2157 for (i = 0; i < target->queue_size; ++i) {
2158 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2160 GFP_KERNEL, DMA_TO_DEVICE);
2161 if (!ch->tx_ring[i])
2164 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2170 for (i = 0; i < target->queue_size; ++i) {
2171 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2172 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2185 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2187 uint64_t T_tr_ns, max_compl_time_ms;
2188 uint32_t rq_tmo_jiffies;
2191 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2192 * table 91), both the QP timeout and the retry count have to be set
2193 * for RC QP's during the RTR to RTS transition.
2195 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2196 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2199 * Set target->rq_tmo_jiffies to one second more than the largest time
2200 * it can take before an error completion is generated. See also
2201 * C9-140..142 in the IBTA spec for more information about how to
2202 * convert the QP Local ACK Timeout value to nanoseconds.
2204 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2205 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2206 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2207 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2209 return rq_tmo_jiffies;
2212 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2213 const struct srp_login_rsp *lrsp,
2214 struct srp_rdma_ch *ch)
2216 struct srp_target_port *target = ch->target;
2217 struct ib_qp_attr *qp_attr = NULL;
2222 if (lrsp->opcode == SRP_LOGIN_RSP) {
2223 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2224 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2227 * Reserve credits for task management so we don't
2228 * bounce requests back to the SCSI mid-layer.
2230 target->scsi_host->can_queue
2231 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2232 target->scsi_host->can_queue);
2233 target->scsi_host->cmd_per_lun
2234 = min_t(int, target->scsi_host->can_queue,
2235 target->scsi_host->cmd_per_lun);
2237 shost_printk(KERN_WARNING, target->scsi_host,
2238 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2244 ret = srp_alloc_iu_bufs(ch);
2250 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2254 qp_attr->qp_state = IB_QPS_RTR;
2255 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2259 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2263 for (i = 0; i < target->queue_size; i++) {
2264 struct srp_iu *iu = ch->rx_ring[i];
2266 ret = srp_post_recv(ch, iu);
2271 qp_attr->qp_state = IB_QPS_RTS;
2272 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2276 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2278 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2282 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2291 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2292 struct ib_cm_event *event,
2293 struct srp_rdma_ch *ch)
2295 struct srp_target_port *target = ch->target;
2296 struct Scsi_Host *shost = target->scsi_host;
2297 struct ib_class_port_info *cpi;
2300 switch (event->param.rej_rcvd.reason) {
2301 case IB_CM_REJ_PORT_CM_REDIRECT:
2302 cpi = event->param.rej_rcvd.ari;
2303 ch->path.dlid = cpi->redirect_lid;
2304 ch->path.pkey = cpi->redirect_pkey;
2305 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2306 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2308 ch->status = ch->path.dlid ?
2309 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2312 case IB_CM_REJ_PORT_REDIRECT:
2313 if (srp_target_is_topspin(target)) {
2315 * Topspin/Cisco SRP gateways incorrectly send
2316 * reject reason code 25 when they mean 24
2319 memcpy(ch->path.dgid.raw,
2320 event->param.rej_rcvd.ari, 16);
2322 shost_printk(KERN_DEBUG, shost,
2323 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2324 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2325 be64_to_cpu(ch->path.dgid.global.interface_id));
2327 ch->status = SRP_PORT_REDIRECT;
2329 shost_printk(KERN_WARNING, shost,
2330 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2331 ch->status = -ECONNRESET;
2335 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2336 shost_printk(KERN_WARNING, shost,
2337 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2338 ch->status = -ECONNRESET;
2341 case IB_CM_REJ_CONSUMER_DEFINED:
2342 opcode = *(u8 *) event->private_data;
2343 if (opcode == SRP_LOGIN_REJ) {
2344 struct srp_login_rej *rej = event->private_data;
2345 u32 reason = be32_to_cpu(rej->reason);
2347 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2348 shost_printk(KERN_WARNING, shost,
2349 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2351 shost_printk(KERN_WARNING, shost, PFX
2352 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2354 target->orig_dgid.raw, reason);
2356 shost_printk(KERN_WARNING, shost,
2357 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2358 " opcode 0x%02x\n", opcode);
2359 ch->status = -ECONNRESET;
2362 case IB_CM_REJ_STALE_CONN:
2363 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2364 ch->status = SRP_STALE_CONN;
2368 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2369 event->param.rej_rcvd.reason);
2370 ch->status = -ECONNRESET;
2374 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2376 struct srp_rdma_ch *ch = cm_id->context;
2377 struct srp_target_port *target = ch->target;
2380 switch (event->event) {
2381 case IB_CM_REQ_ERROR:
2382 shost_printk(KERN_DEBUG, target->scsi_host,
2383 PFX "Sending CM REQ failed\n");
2385 ch->status = -ECONNRESET;
2388 case IB_CM_REP_RECEIVED:
2390 srp_cm_rep_handler(cm_id, event->private_data, ch);
2393 case IB_CM_REJ_RECEIVED:
2394 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2397 srp_cm_rej_handler(cm_id, event, ch);
2400 case IB_CM_DREQ_RECEIVED:
2401 shost_printk(KERN_WARNING, target->scsi_host,
2402 PFX "DREQ received - connection closed\n");
2403 ch->connected = false;
2404 if (ib_send_cm_drep(cm_id, NULL, 0))
2405 shost_printk(KERN_ERR, target->scsi_host,
2406 PFX "Sending CM DREP failed\n");
2407 queue_work(system_long_wq, &target->tl_err_work);
2410 case IB_CM_TIMEWAIT_EXIT:
2411 shost_printk(KERN_ERR, target->scsi_host,
2412 PFX "connection closed\n");
2418 case IB_CM_MRA_RECEIVED:
2419 case IB_CM_DREQ_ERROR:
2420 case IB_CM_DREP_RECEIVED:
2424 shost_printk(KERN_WARNING, target->scsi_host,
2425 PFX "Unhandled CM event %d\n", event->event);
2430 complete(&ch->done);
2436 * srp_change_queue_depth - setting device queue depth
2437 * @sdev: scsi device struct
2438 * @qdepth: requested queue depth
2440 * Returns queue depth.
2443 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2445 if (!sdev->tagged_supported)
2447 return scsi_change_queue_depth(sdev, qdepth);
2450 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2453 struct srp_target_port *target = ch->target;
2454 struct srp_rport *rport = target->rport;
2455 struct ib_device *dev = target->srp_host->srp_dev->dev;
2457 struct srp_tsk_mgmt *tsk_mgmt;
2459 if (!ch->connected || target->qp_in_error)
2462 init_completion(&ch->tsk_mgmt_done);
2465 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2466 * invoked while a task management function is being sent.
2468 mutex_lock(&rport->mutex);
2469 spin_lock_irq(&ch->lock);
2470 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2471 spin_unlock_irq(&ch->lock);
2474 mutex_unlock(&rport->mutex);
2479 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2482 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2484 tsk_mgmt->opcode = SRP_TSK_MGMT;
2485 int_to_scsilun(lun, &tsk_mgmt->lun);
2486 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2487 tsk_mgmt->tsk_mgmt_func = func;
2488 tsk_mgmt->task_tag = req_tag;
2490 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2492 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2493 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2494 mutex_unlock(&rport->mutex);
2498 mutex_unlock(&rport->mutex);
2500 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2501 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2507 static int srp_abort(struct scsi_cmnd *scmnd)
2509 struct srp_target_port *target = host_to_target(scmnd->device->host);
2510 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2513 struct srp_rdma_ch *ch;
2516 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2520 tag = blk_mq_unique_tag(scmnd->request);
2521 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2522 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2524 ch = &target->ch[ch_idx];
2525 if (!srp_claim_req(ch, req, NULL, scmnd))
2527 shost_printk(KERN_ERR, target->scsi_host,
2528 "Sending SRP abort for tag %#x\n", tag);
2529 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2530 SRP_TSK_ABORT_TASK) == 0)
2532 else if (target->rport->state == SRP_RPORT_LOST)
2536 srp_free_req(ch, req, scmnd, 0);
2537 scmnd->result = DID_ABORT << 16;
2538 scmnd->scsi_done(scmnd);
2543 static int srp_reset_device(struct scsi_cmnd *scmnd)
2545 struct srp_target_port *target = host_to_target(scmnd->device->host);
2546 struct srp_rdma_ch *ch;
2549 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2551 ch = &target->ch[0];
2552 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2555 if (ch->tsk_mgmt_status)
2558 for (i = 0; i < target->ch_count; i++) {
2559 ch = &target->ch[i];
2560 for (i = 0; i < target->req_ring_size; ++i) {
2561 struct srp_request *req = &ch->req_ring[i];
2563 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2570 static int srp_reset_host(struct scsi_cmnd *scmnd)
2572 struct srp_target_port *target = host_to_target(scmnd->device->host);
2574 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2576 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2579 static int srp_slave_configure(struct scsi_device *sdev)
2581 struct Scsi_Host *shost = sdev->host;
2582 struct srp_target_port *target = host_to_target(shost);
2583 struct request_queue *q = sdev->request_queue;
2584 unsigned long timeout;
2586 if (sdev->type == TYPE_DISK) {
2587 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2588 blk_queue_rq_timeout(q, timeout);
2594 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2597 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2599 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2602 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2605 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2607 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2610 static ssize_t show_service_id(struct device *dev,
2611 struct device_attribute *attr, char *buf)
2613 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2615 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2618 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2621 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2623 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2626 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2631 return sprintf(buf, "%pI6\n", target->sgid.raw);
2634 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2638 struct srp_rdma_ch *ch = &target->ch[0];
2640 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2643 static ssize_t show_orig_dgid(struct device *dev,
2644 struct device_attribute *attr, char *buf)
2646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2648 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2651 static ssize_t show_req_lim(struct device *dev,
2652 struct device_attribute *attr, char *buf)
2654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655 struct srp_rdma_ch *ch;
2656 int i, req_lim = INT_MAX;
2658 for (i = 0; i < target->ch_count; i++) {
2659 ch = &target->ch[i];
2660 req_lim = min(req_lim, ch->req_lim);
2662 return sprintf(buf, "%d\n", req_lim);
2665 static ssize_t show_zero_req_lim(struct device *dev,
2666 struct device_attribute *attr, char *buf)
2668 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2670 return sprintf(buf, "%d\n", target->zero_req_lim);
2673 static ssize_t show_local_ib_port(struct device *dev,
2674 struct device_attribute *attr, char *buf)
2676 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2678 return sprintf(buf, "%d\n", target->srp_host->port);
2681 static ssize_t show_local_ib_device(struct device *dev,
2682 struct device_attribute *attr, char *buf)
2684 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2686 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2689 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2692 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2694 return sprintf(buf, "%d\n", target->ch_count);
2697 static ssize_t show_comp_vector(struct device *dev,
2698 struct device_attribute *attr, char *buf)
2700 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2702 return sprintf(buf, "%d\n", target->comp_vector);
2705 static ssize_t show_tl_retry_count(struct device *dev,
2706 struct device_attribute *attr, char *buf)
2708 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2710 return sprintf(buf, "%d\n", target->tl_retry_count);
2713 static ssize_t show_cmd_sg_entries(struct device *dev,
2714 struct device_attribute *attr, char *buf)
2716 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2718 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2721 static ssize_t show_allow_ext_sg(struct device *dev,
2722 struct device_attribute *attr, char *buf)
2724 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2726 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2729 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2730 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2731 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2732 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2733 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2734 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2735 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2736 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2737 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2738 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2739 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2740 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2741 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2742 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2743 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2744 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2746 static struct device_attribute *srp_host_attrs[] = {
2749 &dev_attr_service_id,
2753 &dev_attr_orig_dgid,
2755 &dev_attr_zero_req_lim,
2756 &dev_attr_local_ib_port,
2757 &dev_attr_local_ib_device,
2759 &dev_attr_comp_vector,
2760 &dev_attr_tl_retry_count,
2761 &dev_attr_cmd_sg_entries,
2762 &dev_attr_allow_ext_sg,
2766 static struct scsi_host_template srp_template = {
2767 .module = THIS_MODULE,
2768 .name = "InfiniBand SRP initiator",
2769 .proc_name = DRV_NAME,
2770 .slave_configure = srp_slave_configure,
2771 .info = srp_target_info,
2772 .queuecommand = srp_queuecommand,
2773 .change_queue_depth = srp_change_queue_depth,
2774 .eh_abort_handler = srp_abort,
2775 .eh_device_reset_handler = srp_reset_device,
2776 .eh_host_reset_handler = srp_reset_host,
2777 .skip_settle_delay = true,
2778 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2779 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2781 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2782 .use_clustering = ENABLE_CLUSTERING,
2783 .shost_attrs = srp_host_attrs,
2784 .track_queue_depth = 1,
2787 static int srp_sdev_count(struct Scsi_Host *host)
2789 struct scsi_device *sdev;
2792 shost_for_each_device(sdev, host)
2800 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2801 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2802 * removal has been scheduled.
2803 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2805 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2807 struct srp_rport_identifiers ids;
2808 struct srp_rport *rport;
2810 target->state = SRP_TARGET_SCANNING;
2811 sprintf(target->target_name, "SRP.T10:%016llX",
2812 be64_to_cpu(target->id_ext));
2814 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2817 memcpy(ids.port_id, &target->id_ext, 8);
2818 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2819 ids.roles = SRP_RPORT_ROLE_TARGET;
2820 rport = srp_rport_add(target->scsi_host, &ids);
2821 if (IS_ERR(rport)) {
2822 scsi_remove_host(target->scsi_host);
2823 return PTR_ERR(rport);
2826 rport->lld_data = target;
2827 target->rport = rport;
2829 spin_lock(&host->target_lock);
2830 list_add_tail(&target->list, &host->target_list);
2831 spin_unlock(&host->target_lock);
2833 scsi_scan_target(&target->scsi_host->shost_gendev,
2834 0, target->scsi_id, SCAN_WILD_CARD, 0);
2836 if (srp_connected_ch(target) < target->ch_count ||
2837 target->qp_in_error) {
2838 shost_printk(KERN_INFO, target->scsi_host,
2839 PFX "SCSI scan failed - removing SCSI host\n");
2840 srp_queue_remove_work(target);
2844 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2845 dev_name(&target->scsi_host->shost_gendev),
2846 srp_sdev_count(target->scsi_host));
2848 spin_lock_irq(&target->lock);
2849 if (target->state == SRP_TARGET_SCANNING)
2850 target->state = SRP_TARGET_LIVE;
2851 spin_unlock_irq(&target->lock);
2857 static void srp_release_dev(struct device *dev)
2859 struct srp_host *host =
2860 container_of(dev, struct srp_host, dev);
2862 complete(&host->released);
2865 static struct class srp_class = {
2866 .name = "infiniband_srp",
2867 .dev_release = srp_release_dev
2871 * srp_conn_unique() - check whether the connection to a target is unique
2873 * @target: SRP target port.
2875 static bool srp_conn_unique(struct srp_host *host,
2876 struct srp_target_port *target)
2878 struct srp_target_port *t;
2881 if (target->state == SRP_TARGET_REMOVED)
2886 spin_lock(&host->target_lock);
2887 list_for_each_entry(t, &host->target_list, list) {
2889 target->id_ext == t->id_ext &&
2890 target->ioc_guid == t->ioc_guid &&
2891 target->initiator_ext == t->initiator_ext) {
2896 spin_unlock(&host->target_lock);
2903 * Target ports are added by writing
2905 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2906 * pkey=<P_Key>,service_id=<service ID>
2908 * to the add_target sysfs attribute.
2912 SRP_OPT_ID_EXT = 1 << 0,
2913 SRP_OPT_IOC_GUID = 1 << 1,
2914 SRP_OPT_DGID = 1 << 2,
2915 SRP_OPT_PKEY = 1 << 3,
2916 SRP_OPT_SERVICE_ID = 1 << 4,
2917 SRP_OPT_MAX_SECT = 1 << 5,
2918 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2919 SRP_OPT_IO_CLASS = 1 << 7,
2920 SRP_OPT_INITIATOR_EXT = 1 << 8,
2921 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2922 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2923 SRP_OPT_SG_TABLESIZE = 1 << 11,
2924 SRP_OPT_COMP_VECTOR = 1 << 12,
2925 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2926 SRP_OPT_QUEUE_SIZE = 1 << 14,
2927 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2931 SRP_OPT_SERVICE_ID),
2934 static const match_table_t srp_opt_tokens = {
2935 { SRP_OPT_ID_EXT, "id_ext=%s" },
2936 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2937 { SRP_OPT_DGID, "dgid=%s" },
2938 { SRP_OPT_PKEY, "pkey=%x" },
2939 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2940 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2941 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2942 { SRP_OPT_IO_CLASS, "io_class=%x" },
2943 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2944 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2945 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2946 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2947 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2948 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2949 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2950 { SRP_OPT_ERR, NULL }
2953 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2955 char *options, *sep_opt;
2958 substring_t args[MAX_OPT_ARGS];
2964 options = kstrdup(buf, GFP_KERNEL);
2969 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2973 token = match_token(p, srp_opt_tokens, args);
2977 case SRP_OPT_ID_EXT:
2978 p = match_strdup(args);
2983 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2987 case SRP_OPT_IOC_GUID:
2988 p = match_strdup(args);
2993 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998 p = match_strdup(args);
3003 if (strlen(p) != 32) {
3004 pr_warn("bad dest GID parameter '%s'\n", p);
3009 for (i = 0; i < 16; ++i) {
3010 strlcpy(dgid, p + i * 2, sizeof(dgid));
3011 if (sscanf(dgid, "%hhx",
3012 &target->orig_dgid.raw[i]) < 1) {
3022 if (match_hex(args, &token)) {
3023 pr_warn("bad P_Key parameter '%s'\n", p);
3026 target->pkey = cpu_to_be16(token);
3029 case SRP_OPT_SERVICE_ID:
3030 p = match_strdup(args);
3035 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3039 case SRP_OPT_MAX_SECT:
3040 if (match_int(args, &token)) {
3041 pr_warn("bad max sect parameter '%s'\n", p);
3044 target->scsi_host->max_sectors = token;
3047 case SRP_OPT_QUEUE_SIZE:
3048 if (match_int(args, &token) || token < 1) {
3049 pr_warn("bad queue_size parameter '%s'\n", p);
3052 target->scsi_host->can_queue = token;
3053 target->queue_size = token + SRP_RSP_SQ_SIZE +
3054 SRP_TSK_MGMT_SQ_SIZE;
3055 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3056 target->scsi_host->cmd_per_lun = token;
3059 case SRP_OPT_MAX_CMD_PER_LUN:
3060 if (match_int(args, &token) || token < 1) {
3061 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3065 target->scsi_host->cmd_per_lun = token;
3068 case SRP_OPT_IO_CLASS:
3069 if (match_hex(args, &token)) {
3070 pr_warn("bad IO class parameter '%s'\n", p);
3073 if (token != SRP_REV10_IB_IO_CLASS &&
3074 token != SRP_REV16A_IB_IO_CLASS) {
3075 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3076 token, SRP_REV10_IB_IO_CLASS,
3077 SRP_REV16A_IB_IO_CLASS);
3080 target->io_class = token;
3083 case SRP_OPT_INITIATOR_EXT:
3084 p = match_strdup(args);
3089 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3093 case SRP_OPT_CMD_SG_ENTRIES:
3094 if (match_int(args, &token) || token < 1 || token > 255) {
3095 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3099 target->cmd_sg_cnt = token;
3102 case SRP_OPT_ALLOW_EXT_SG:
3103 if (match_int(args, &token)) {
3104 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3107 target->allow_ext_sg = !!token;
3110 case SRP_OPT_SG_TABLESIZE:
3111 if (match_int(args, &token) || token < 1 ||
3112 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3113 pr_warn("bad max sg_tablesize parameter '%s'\n",
3117 target->sg_tablesize = token;
3120 case SRP_OPT_COMP_VECTOR:
3121 if (match_int(args, &token) || token < 0) {
3122 pr_warn("bad comp_vector parameter '%s'\n", p);
3125 target->comp_vector = token;
3128 case SRP_OPT_TL_RETRY_COUNT:
3129 if (match_int(args, &token) || token < 2 || token > 7) {
3130 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3134 target->tl_retry_count = token;
3138 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3144 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3147 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3148 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3149 !(srp_opt_tokens[i].token & opt_mask))
3150 pr_warn("target creation request is missing parameter '%s'\n",
3151 srp_opt_tokens[i].pattern);
3153 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3154 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3155 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3156 target->scsi_host->cmd_per_lun,
3157 target->scsi_host->can_queue);
3164 static ssize_t srp_create_target(struct device *dev,
3165 struct device_attribute *attr,
3166 const char *buf, size_t count)
3168 struct srp_host *host =
3169 container_of(dev, struct srp_host, dev);
3170 struct Scsi_Host *target_host;
3171 struct srp_target_port *target;
3172 struct srp_rdma_ch *ch;
3173 struct srp_device *srp_dev = host->srp_dev;
3174 struct ib_device *ibdev = srp_dev->dev;
3175 int ret, node_idx, node, cpu, i;
3176 bool multich = false;
3178 target_host = scsi_host_alloc(&srp_template,
3179 sizeof (struct srp_target_port));
3183 target_host->transportt = ib_srp_transport_template;
3184 target_host->max_channel = 0;
3185 target_host->max_id = 1;
3186 target_host->max_lun = -1LL;
3187 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3189 target = host_to_target(target_host);
3191 target->io_class = SRP_REV16A_IB_IO_CLASS;
3192 target->scsi_host = target_host;
3193 target->srp_host = host;
3194 target->lkey = host->srp_dev->pd->local_dma_lkey;
3195 target->global_mr = host->srp_dev->global_mr;
3196 target->cmd_sg_cnt = cmd_sg_entries;
3197 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3198 target->allow_ext_sg = allow_ext_sg;
3199 target->tl_retry_count = 7;
3200 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3203 * Avoid that the SCSI host can be removed by srp_remove_target()
3204 * before this function returns.
3206 scsi_host_get(target->scsi_host);
3208 mutex_lock(&host->add_target_mutex);
3210 ret = srp_parse_options(buf, target);
3214 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3216 if (!srp_conn_unique(target->srp_host, target)) {
3217 shost_printk(KERN_INFO, target->scsi_host,
3218 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3219 be64_to_cpu(target->id_ext),
3220 be64_to_cpu(target->ioc_guid),
3221 be64_to_cpu(target->initiator_ext));
3226 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3227 target->cmd_sg_cnt < target->sg_tablesize) {
3228 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3229 target->sg_tablesize = target->cmd_sg_cnt;
3232 target_host->sg_tablesize = target->sg_tablesize;
3233 target->mr_pool_size = target->scsi_host->can_queue;
3234 target->indirect_size = target->sg_tablesize *
3235 sizeof (struct srp_direct_buf);
3236 target->max_iu_len = sizeof (struct srp_cmd) +
3237 sizeof (struct srp_indirect_buf) +
3238 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3240 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3241 INIT_WORK(&target->remove_work, srp_remove_work);
3242 spin_lock_init(&target->lock);
3243 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3248 target->ch_count = max_t(unsigned, num_online_nodes(),
3250 min(4 * num_online_nodes(),
3251 ibdev->num_comp_vectors),
3252 num_online_cpus()));
3253 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3259 for_each_online_node(node) {
3260 const int ch_start = (node_idx * target->ch_count /
3261 num_online_nodes());
3262 const int ch_end = ((node_idx + 1) * target->ch_count /
3263 num_online_nodes());
3264 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3265 num_online_nodes() + target->comp_vector)
3266 % ibdev->num_comp_vectors;
3267 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3268 num_online_nodes() + target->comp_vector)
3269 % ibdev->num_comp_vectors;
3272 for_each_online_cpu(cpu) {
3273 if (cpu_to_node(cpu) != node)
3275 if (ch_start + cpu_idx >= ch_end)
3277 ch = &target->ch[ch_start + cpu_idx];
3278 ch->target = target;
3279 ch->comp_vector = cv_start == cv_end ? cv_start :
3280 cv_start + cpu_idx % (cv_end - cv_start);
3281 spin_lock_init(&ch->lock);
3282 INIT_LIST_HEAD(&ch->free_tx);
3283 ret = srp_new_cm_id(ch);
3285 goto err_disconnect;
3287 ret = srp_create_ch_ib(ch);
3289 goto err_disconnect;
3291 ret = srp_alloc_req_data(ch);
3293 goto err_disconnect;
3295 ret = srp_connect_ch(ch, multich);
3297 shost_printk(KERN_ERR, target->scsi_host,
3298 PFX "Connection %d/%d failed\n",
3301 if (node_idx == 0 && cpu_idx == 0) {
3302 goto err_disconnect;
3304 srp_free_ch_ib(target, ch);
3305 srp_free_req_data(target, ch);
3306 target->ch_count = ch - target->ch;
3318 target->scsi_host->nr_hw_queues = target->ch_count;
3320 ret = srp_add_target(host, target);
3322 goto err_disconnect;
3324 if (target->state != SRP_TARGET_REMOVED) {
3325 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3326 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3327 be64_to_cpu(target->id_ext),
3328 be64_to_cpu(target->ioc_guid),
3329 be16_to_cpu(target->pkey),
3330 be64_to_cpu(target->service_id),
3331 target->sgid.raw, target->orig_dgid.raw);
3337 mutex_unlock(&host->add_target_mutex);
3339 scsi_host_put(target->scsi_host);
3341 scsi_host_put(target->scsi_host);
3346 srp_disconnect_target(target);
3348 for (i = 0; i < target->ch_count; i++) {
3349 ch = &target->ch[i];
3350 srp_free_ch_ib(target, ch);
3351 srp_free_req_data(target, ch);
3358 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3360 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3363 struct srp_host *host = container_of(dev, struct srp_host, dev);
3365 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3368 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3370 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3373 struct srp_host *host = container_of(dev, struct srp_host, dev);
3375 return sprintf(buf, "%d\n", host->port);
3378 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3380 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3382 struct srp_host *host;
3384 host = kzalloc(sizeof *host, GFP_KERNEL);
3388 INIT_LIST_HEAD(&host->target_list);
3389 spin_lock_init(&host->target_lock);
3390 init_completion(&host->released);
3391 mutex_init(&host->add_target_mutex);
3392 host->srp_dev = device;
3395 host->dev.class = &srp_class;
3396 host->dev.parent = device->dev->dma_device;
3397 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3399 if (device_register(&host->dev))
3401 if (device_create_file(&host->dev, &dev_attr_add_target))
3403 if (device_create_file(&host->dev, &dev_attr_ibdev))
3405 if (device_create_file(&host->dev, &dev_attr_port))
3411 device_unregister(&host->dev);
3419 static void srp_add_one(struct ib_device *device)
3421 struct srp_device *srp_dev;
3422 struct srp_host *host;
3423 int mr_page_shift, p;
3424 u64 max_pages_per_mr;
3426 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3430 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3431 device->map_phys_fmr && device->unmap_fmr);
3432 srp_dev->has_fr = (device->attrs.device_cap_flags &
3433 IB_DEVICE_MEM_MGT_EXTENSIONS);
3434 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3435 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3437 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3438 (!srp_dev->has_fmr || prefer_fr));
3439 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3442 * Use the smallest page size supported by the HCA, down to a
3443 * minimum of 4096 bytes. We're unlikely to build large sglists
3444 * out of smaller entries.
3446 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
3447 srp_dev->mr_page_size = 1 << mr_page_shift;
3448 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3449 max_pages_per_mr = device->attrs.max_mr_size;
3450 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3451 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3453 if (srp_dev->use_fast_reg) {
3454 srp_dev->max_pages_per_mr =
3455 min_t(u32, srp_dev->max_pages_per_mr,
3456 device->attrs.max_fast_reg_page_list_len);
3458 srp_dev->mr_max_size = srp_dev->mr_page_size *
3459 srp_dev->max_pages_per_mr;
3460 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3461 device->name, mr_page_shift, device->attrs.max_mr_size,
3462 device->attrs.max_fast_reg_page_list_len,
3463 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3465 INIT_LIST_HEAD(&srp_dev->dev_list);
3467 srp_dev->dev = device;
3468 srp_dev->pd = ib_alloc_pd(device);
3469 if (IS_ERR(srp_dev->pd))
3472 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3473 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3474 IB_ACCESS_LOCAL_WRITE |
3475 IB_ACCESS_REMOTE_READ |
3476 IB_ACCESS_REMOTE_WRITE);
3477 if (IS_ERR(srp_dev->global_mr))
3480 srp_dev->global_mr = NULL;
3483 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3484 host = srp_add_port(srp_dev, p);
3486 list_add_tail(&host->list, &srp_dev->dev_list);
3489 ib_set_client_data(device, &srp_client, srp_dev);
3493 ib_dealloc_pd(srp_dev->pd);
3499 static void srp_remove_one(struct ib_device *device, void *client_data)
3501 struct srp_device *srp_dev;
3502 struct srp_host *host, *tmp_host;
3503 struct srp_target_port *target;
3505 srp_dev = client_data;
3509 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3510 device_unregister(&host->dev);
3512 * Wait for the sysfs entry to go away, so that no new
3513 * target ports can be created.
3515 wait_for_completion(&host->released);
3518 * Remove all target ports.
3520 spin_lock(&host->target_lock);
3521 list_for_each_entry(target, &host->target_list, list)
3522 srp_queue_remove_work(target);
3523 spin_unlock(&host->target_lock);
3526 * Wait for tl_err and target port removal tasks.
3528 flush_workqueue(system_long_wq);
3529 flush_workqueue(srp_remove_wq);
3534 if (srp_dev->global_mr)
3535 ib_dereg_mr(srp_dev->global_mr);
3536 ib_dealloc_pd(srp_dev->pd);
3541 static struct srp_function_template ib_srp_transport_functions = {
3542 .has_rport_state = true,
3543 .reset_timer_if_blocked = true,
3544 .reconnect_delay = &srp_reconnect_delay,
3545 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3546 .dev_loss_tmo = &srp_dev_loss_tmo,
3547 .reconnect = srp_rport_reconnect,
3548 .rport_delete = srp_rport_delete,
3549 .terminate_rport_io = srp_terminate_io,
3552 static int __init srp_init_module(void)
3556 if (srp_sg_tablesize) {
3557 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3558 if (!cmd_sg_entries)
3559 cmd_sg_entries = srp_sg_tablesize;
3562 if (!cmd_sg_entries)
3563 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3565 if (cmd_sg_entries > 255) {
3566 pr_warn("Clamping cmd_sg_entries to 255\n");
3567 cmd_sg_entries = 255;
3570 if (!indirect_sg_entries)
3571 indirect_sg_entries = cmd_sg_entries;
3572 else if (indirect_sg_entries < cmd_sg_entries) {
3573 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3575 indirect_sg_entries = cmd_sg_entries;
3578 srp_remove_wq = create_workqueue("srp_remove");
3579 if (!srp_remove_wq) {
3585 ib_srp_transport_template =
3586 srp_attach_transport(&ib_srp_transport_functions);
3587 if (!ib_srp_transport_template)
3590 ret = class_register(&srp_class);
3592 pr_err("couldn't register class infiniband_srp\n");
3596 ib_sa_register_client(&srp_sa_client);
3598 ret = ib_register_client(&srp_client);
3600 pr_err("couldn't register IB client\n");
3608 ib_sa_unregister_client(&srp_sa_client);
3609 class_unregister(&srp_class);
3612 srp_release_transport(ib_srp_transport_template);
3615 destroy_workqueue(srp_remove_wq);
3619 static void __exit srp_cleanup_module(void)
3621 ib_unregister_client(&srp_client);
3622 ib_sa_unregister_client(&srp_sa_client);
3623 class_unregister(&srp_class);
3624 srp_release_transport(ib_srp_transport_template);
3625 destroy_workqueue(srp_remove_wq);
3628 module_init(srp_init_module);
3629 module_exit(srp_cleanup_module);