2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
47 #include <linux/atomic.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsi_transport_srp.h>
58 #define DRV_NAME "ib_srp"
59 #define PFX DRV_NAME ": "
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool register_always = true;
75 static bool never_register;
76 static int topspin_workarounds = 1;
78 module_param(srp_sg_tablesize, uint, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
81 module_param(cmd_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries,
83 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
85 module_param(indirect_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries,
87 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
89 module_param(allow_ext_sg, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg,
91 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
93 module_param(topspin_workarounds, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds,
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
101 module_param(never_register, bool, 0444);
102 MODULE_PARM_DESC(never_register, "Never register memory");
104 static const struct kernel_param_ops srp_tmo_ops;
106 static int srp_reconnect_delay = 10;
107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
111 static int srp_fast_io_fail_tmo = 15;
112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
114 MODULE_PARM_DESC(fast_io_fail_tmo,
115 "Number of seconds between the observation of a transport"
116 " layer error and failing all I/O. \"off\" means that this"
117 " functionality is disabled.");
119 static int srp_dev_loss_tmo = 600;
120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
122 MODULE_PARM_DESC(dev_loss_tmo,
123 "Maximum number of seconds that the SRP transport should"
124 " insulate transport layer errors. After this time has been"
125 " exceeded the SCSI host is removed. Should be"
126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 " this functionality is disabled.");
130 static bool srp_use_imm_data = true;
131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data,
133 "Whether or not to request permission to use immediate data during SRP login.");
135 static unsigned int srp_max_imm_data = 8 * 1024;
136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
139 static unsigned ch_count;
140 module_param(ch_count, uint, 0444);
141 MODULE_PARM_DESC(ch_count,
142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
144 static int srp_add_one(struct ib_device *device);
145 static void srp_remove_one(struct ib_device *device, void *client_data);
146 static void srp_rename_dev(struct ib_device *device, void *client_data);
147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 const struct ib_cm_event *event);
152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 struct rdma_cm_event *event);
155 static struct scsi_transport_template *ib_srp_transport_template;
156 static struct workqueue_struct *srp_remove_wq;
158 static struct ib_client srp_client = {
161 .remove = srp_remove_one,
162 .rename = srp_rename_dev
165 static struct ib_sa_client srp_sa_client;
167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
169 int tmo = *(int *)kp->arg;
172 return sysfs_emit(buffer, "%d\n", tmo);
174 return sysfs_emit(buffer, "off\n");
177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
181 res = srp_parse_tmo(&tmo, val);
185 if (kp->arg == &srp_reconnect_delay)
186 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
188 else if (kp->arg == &srp_fast_io_fail_tmo)
189 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
191 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
195 *(int *)kp->arg = tmo;
201 static const struct kernel_param_ops srp_tmo_ops = {
206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
208 return (struct srp_target_port *) host->hostdata;
211 static const char *srp_target_info(struct Scsi_Host *host)
213 return host_to_target(host)->target_name;
216 static int srp_target_is_topspin(struct srp_target_port *target)
218 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
221 return topspin_workarounds &&
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
228 enum dma_data_direction direction)
232 iu = kmalloc(sizeof *iu, gfp_mask);
236 iu->buf = kzalloc(size, gfp_mask);
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
246 iu->direction = direction;
258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
269 static void srp_qp_event(struct ib_event *event, void *context)
271 pr_debug("QP event %s (%d)\n",
272 ib_event_msg(event->event), event->event);
275 static int srp_init_ib_qp(struct srp_target_port *target,
278 struct ib_qp_attr *attr;
281 attr = kmalloc(sizeof *attr, GFP_KERNEL);
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 target->srp_host->port,
287 be16_to_cpu(target->ib_cm.pkey),
292 attr->qp_state = IB_QPS_INIT;
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 IB_ACCESS_REMOTE_WRITE);
295 attr->port_num = target->srp_host->port;
297 ret = ib_modify_qp(qp, attr,
308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
310 struct srp_target_port *target = ch->target;
311 struct ib_cm_id *new_cm_id;
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 srp_ib_cm_handler, ch);
315 if (IS_ERR(new_cm_id))
316 return PTR_ERR(new_cm_id);
319 ib_destroy_cm_id(ch->ib_cm.cm_id);
320 ch->ib_cm.cm_id = new_cm_id;
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 target->srp_host->port))
323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 ch->ib_cm.path.sgid = target->sgid;
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 ch->ib_cm.path.service_id = target->ib_cm.service_id;
334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
336 struct srp_target_port *target = ch->target;
337 struct rdma_cm_id *new_cm_id;
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 RDMA_PS_TCP, IB_QPT_RC);
342 if (IS_ERR(new_cm_id)) {
343 ret = PTR_ERR(new_cm_id);
348 init_completion(&ch->done);
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 &target->rdma_cm.src.sa : NULL,
351 &target->rdma_cm.dst.sa,
352 SRP_PATH_REC_TIMEOUT_MS);
354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
358 ret = wait_for_completion_interruptible(&ch->done);
364 pr_err("Resolving address %pISpsc failed (%d)\n",
365 &target->rdma_cm.dst, ret);
369 swap(ch->rdma_cm.cm_id, new_cm_id);
373 rdma_destroy_id(new_cm_id);
378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
380 struct srp_target_port *target = ch->target;
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 srp_new_ib_cm_id(ch);
387 * srp_destroy_fr_pool() - free the resources owned by a pool
388 * @pool: Fast registration pool to be destroyed.
390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
393 struct srp_fr_desc *d;
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407 * @device: IB device to allocate fast registration descriptors for.
408 * @pd: Protection domain associated with the FR descriptors.
409 * @pool_size: Number of descriptors to allocate.
410 * @max_page_list_len: Maximum fast registration work request page list length.
412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 struct ib_pd *pd, int pool_size,
414 int max_page_list_len)
416 struct srp_fr_pool *pool;
417 struct srp_fr_desc *d;
419 int i, ret = -EINVAL;
420 enum ib_mr_type mr_type;
425 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
428 pool->size = pool_size;
429 pool->max_page_list_len = max_page_list_len;
430 spin_lock_init(&pool->lock);
431 INIT_LIST_HEAD(&pool->free_list);
433 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434 mr_type = IB_MR_TYPE_SG_GAPS;
436 mr_type = IB_MR_TYPE_MEM_REG;
438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 dev_name(&device->dev));
448 list_add_tail(&d->entry, &pool->free_list);
455 srp_destroy_fr_pool(pool);
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464 * @pool: Pool to obtain descriptor from.
466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
468 struct srp_fr_desc *d = NULL;
471 spin_lock_irqsave(&pool->lock, flags);
472 if (!list_empty(&pool->free_list)) {
473 d = list_first_entry(&pool->free_list, typeof(*d), entry);
476 spin_unlock_irqrestore(&pool->lock, flags);
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
483 * @pool: Pool the descriptor was allocated from.
484 * @desc: Pointer to an array of fast registration descriptor pointers.
485 * @n: Number of descriptors to put back.
487 * Note: The caller must already have queued an invalidation request for
488 * desc->mr->rkey before calling this function.
490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
496 spin_lock_irqsave(&pool->lock, flags);
497 for (i = 0; i < n; i++)
498 list_add(&desc[i]->entry, &pool->free_list);
499 spin_unlock_irqrestore(&pool->lock, flags);
502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
504 struct srp_device *dev = target->srp_host->srp_dev;
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 dev->max_pages_per_mr);
511 * srp_destroy_qp() - destroy an RDMA queue pair
512 * @ch: SRP RDMA channel.
514 * Drain the qp before destroying it. This avoids that the receive
515 * completion handler can access the queue pair while it is
518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
520 spin_lock_irq(&ch->lock);
521 ib_process_cq_direct(ch->send_cq, -1);
522 spin_unlock_irq(&ch->lock);
525 ib_destroy_qp(ch->qp);
528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
530 struct srp_target_port *target = ch->target;
531 struct srp_device *dev = target->srp_host->srp_dev;
532 const struct ib_device_attr *attr = &dev->dev->attrs;
533 struct ib_qp_init_attr *init_attr;
534 struct ib_cq *recv_cq, *send_cq;
536 struct srp_fr_pool *fr_pool = NULL;
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
540 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
544 /* queue_size + 1 for ib_drain_rq() */
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 ch->comp_vector, IB_POLL_SOFTIRQ);
547 if (IS_ERR(recv_cq)) {
548 ret = PTR_ERR(recv_cq);
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 ch->comp_vector, IB_POLL_DIRECT);
554 if (IS_ERR(send_cq)) {
555 ret = PTR_ERR(send_cq);
559 init_attr->event_handler = srp_qp_event;
560 init_attr->cap.max_send_wr = m * target->queue_size;
561 init_attr->cap.max_recv_wr = target->queue_size + 1;
562 init_attr->cap.max_recv_sge = 1;
563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
565 init_attr->qp_type = IB_QPT_RC;
566 init_attr->send_cq = send_cq;
567 init_attr->recv_cq = recv_cq;
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
571 if (target->using_rdma_cm) {
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 qp = ch->rdma_cm.cm_id->qp;
575 qp = ib_create_qp(dev->pd, init_attr);
577 ret = srp_init_ib_qp(target, qp);
585 pr_err("QP creation failed for dev %s: %d\n",
586 dev_name(&dev->dev->dev), ret);
590 if (dev->use_fast_reg) {
591 fr_pool = srp_alloc_fr_pool(target);
592 if (IS_ERR(fr_pool)) {
593 ret = PTR_ERR(fr_pool);
594 shost_printk(KERN_WARNING, target->scsi_host, PFX
595 "FR pool allocation failed (%d)\n", ret);
603 ib_free_cq(ch->recv_cq);
605 ib_free_cq(ch->send_cq);
608 ch->recv_cq = recv_cq;
609 ch->send_cq = send_cq;
611 if (dev->use_fast_reg) {
613 srp_destroy_fr_pool(ch->fr_pool);
614 ch->fr_pool = fr_pool;
621 if (target->using_rdma_cm)
622 rdma_destroy_qp(ch->rdma_cm.cm_id);
638 * Note: this function may be called without srp_alloc_iu_bufs() having been
639 * invoked. Hence the ch->[rt]x_ring checks.
641 static void srp_free_ch_ib(struct srp_target_port *target,
642 struct srp_rdma_ch *ch)
644 struct srp_device *dev = target->srp_host->srp_dev;
650 if (target->using_rdma_cm) {
651 if (ch->rdma_cm.cm_id) {
652 rdma_destroy_id(ch->rdma_cm.cm_id);
653 ch->rdma_cm.cm_id = NULL;
656 if (ch->ib_cm.cm_id) {
657 ib_destroy_cm_id(ch->ib_cm.cm_id);
658 ch->ib_cm.cm_id = NULL;
662 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
666 if (dev->use_fast_reg) {
668 srp_destroy_fr_pool(ch->fr_pool);
672 ib_free_cq(ch->send_cq);
673 ib_free_cq(ch->recv_cq);
676 * Avoid that the SCSI error handler tries to use this channel after
677 * it has been freed. The SCSI error handler can namely continue
678 * trying to perform recovery actions after scsi_remove_host()
684 ch->send_cq = ch->recv_cq = NULL;
687 for (i = 0; i < target->queue_size; ++i)
688 srp_free_iu(target->srp_host, ch->rx_ring[i]);
693 for (i = 0; i < target->queue_size; ++i)
694 srp_free_iu(target->srp_host, ch->tx_ring[i]);
700 static void srp_path_rec_completion(int status,
701 struct sa_path_rec *pathrec,
704 struct srp_rdma_ch *ch = ch_ptr;
705 struct srp_target_port *target = ch->target;
709 shost_printk(KERN_ERR, target->scsi_host,
710 PFX "Got failed path rec status %d\n", status);
712 ch->ib_cm.path = *pathrec;
716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
718 struct srp_target_port *target = ch->target;
721 ch->ib_cm.path.numb_path = 1;
723 init_completion(&ch->done);
725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 target->srp_host->srp_dev->dev,
727 target->srp_host->port,
729 IB_SA_PATH_REC_SERVICE_ID |
730 IB_SA_PATH_REC_DGID |
731 IB_SA_PATH_REC_SGID |
732 IB_SA_PATH_REC_NUMB_PATH |
734 SRP_PATH_REC_TIMEOUT_MS,
736 srp_path_rec_completion,
737 ch, &ch->ib_cm.path_query);
738 if (ch->ib_cm.path_query_id < 0)
739 return ch->ib_cm.path_query_id;
741 ret = wait_for_completion_interruptible(&ch->done);
746 shost_printk(KERN_WARNING, target->scsi_host,
747 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 be16_to_cpu(target->ib_cm.pkey),
750 be64_to_cpu(target->ib_cm.service_id));
755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
757 struct srp_target_port *target = ch->target;
760 init_completion(&ch->done);
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
766 wait_for_completion_interruptible(&ch->done);
769 shost_printk(KERN_WARNING, target->scsi_host,
770 PFX "Path resolution failed\n");
775 static int srp_lookup_path(struct srp_rdma_ch *ch)
777 struct srp_target_port *target = ch->target;
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 srp_ib_lookup_path(ch);
783 static u8 srp_get_subnet_timeout(struct srp_host *host)
785 struct ib_port_attr attr;
787 u8 subnet_timeout = 18;
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
791 subnet_timeout = attr.subnet_timeout;
793 if (unlikely(subnet_timeout < 15))
794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
797 return subnet_timeout;
800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
803 struct srp_target_port *target = ch->target;
805 struct rdma_conn_param rdma_param;
806 struct srp_login_req_rdma rdma_req;
807 struct ib_cm_req_param ib_param;
808 struct srp_login_req ib_req;
813 req = kzalloc(sizeof *req, GFP_KERNEL);
817 req->ib_param.flow_control = 1;
818 req->ib_param.retry_count = target->tl_retry_count;
821 * Pick some arbitrary defaults here; we could make these
822 * module parameters if anyone cared about setting them.
824 req->ib_param.responder_resources = 4;
825 req->ib_param.rnr_retry_count = 7;
826 req->ib_param.max_cm_retries = 15;
828 req->ib_req.opcode = SRP_LOGIN_REQ;
830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 SRP_BUF_FORMAT_INDIRECT);
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 SRP_MULTICHAN_SINGLE);
835 if (srp_use_imm_data) {
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
840 if (target->using_rdma_cm) {
841 req->rdma_param.flow_control = req->ib_param.flow_control;
842 req->rdma_param.responder_resources =
843 req->ib_param.responder_resources;
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 req->rdma_param.retry_count = req->ib_param.retry_count;
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 req->rdma_param.private_data = &req->rdma_req;
848 req->rdma_param.private_data_len = sizeof(req->rdma_req);
850 req->rdma_req.opcode = req->ib_req.opcode;
851 req->rdma_req.tag = req->ib_req.tag;
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 req->rdma_req.req_flags = req->ib_req.req_flags;
855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
857 ipi = req->rdma_req.initiator_port_id;
858 tpi = req->rdma_req.target_port_id;
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
864 req->ib_param.primary_path = &ch->ib_cm.path;
865 req->ib_param.alternate_path = NULL;
866 req->ib_param.service_id = target->ib_cm.service_id;
867 get_random_bytes(&req->ib_param.starting_psn, 4);
868 req->ib_param.starting_psn &= 0xffffff;
869 req->ib_param.qp_num = ch->qp->qp_num;
870 req->ib_param.qp_type = ch->qp->qp_type;
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 req->ib_param.private_data = &req->ib_req;
874 req->ib_param.private_data_len = sizeof(req->ib_req);
876 ipi = req->ib_req.initiator_port_id;
877 tpi = req->ib_req.target_port_id;
881 * In the published SRP specification (draft rev. 16a), the
882 * port identifier format is 8 bytes of ID extension followed
883 * by 8 bytes of GUID. Older drafts put the two halves in the
884 * opposite order, so that the GUID comes first.
886 * Targets conforming to these obsolete drafts can be
887 * recognized by the I/O Class they report.
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 memcpy(ipi, &target->sgid.global.interface_id, 8);
891 memcpy(ipi + 8, &target->initiator_ext, 8);
892 memcpy(tpi, &target->ioc_guid, 8);
893 memcpy(tpi + 8, &target->id_ext, 8);
895 memcpy(ipi, &target->initiator_ext, 8);
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 memcpy(tpi, &target->id_ext, 8);
898 memcpy(tpi + 8, &target->ioc_guid, 8);
902 * Topspin/Cisco SRP targets will reject our login unless we
903 * zero out the first 8 bytes of our initiator port ID and set
904 * the second 8 bytes to the local node GUID.
906 if (srp_target_is_topspin(target)) {
907 shost_printk(KERN_DEBUG, target->scsi_host,
908 PFX "Topspin/Cisco initiator port ID workaround "
909 "activated for target GUID %016llx\n",
910 be64_to_cpu(target->ioc_guid));
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
915 if (target->using_rdma_cm)
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
925 static bool srp_queue_remove_work(struct srp_target_port *target)
927 bool changed = false;
929 spin_lock_irq(&target->lock);
930 if (target->state != SRP_TARGET_REMOVED) {
931 target->state = SRP_TARGET_REMOVED;
934 spin_unlock_irq(&target->lock);
937 queue_work(srp_remove_wq, &target->remove_work);
942 static void srp_disconnect_target(struct srp_target_port *target)
944 struct srp_rdma_ch *ch;
947 /* XXX should send SRP_I_LOGOUT request */
949 for (i = 0; i < target->ch_count; i++) {
951 ch->connected = false;
953 if (target->using_rdma_cm) {
954 if (ch->rdma_cm.cm_id)
955 rdma_disconnect(ch->rdma_cm.cm_id);
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
962 shost_printk(KERN_DEBUG, target->scsi_host,
963 PFX "Sending CM DREQ failed\n");
968 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
970 struct srp_target_port *target = host_to_target(shost);
971 struct srp_device *dev = target->srp_host->srp_dev;
972 struct ib_device *ibdev = dev->dev;
973 struct srp_request *req = scsi_cmd_priv(cmd);
976 if (req->indirect_dma_addr) {
977 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
978 target->indirect_size,
981 kfree(req->indirect_desc);
986 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
988 struct srp_target_port *target = host_to_target(shost);
989 struct srp_device *srp_dev = target->srp_host->srp_dev;
990 struct ib_device *ibdev = srp_dev->dev;
991 struct srp_request *req = scsi_cmd_priv(cmd);
995 if (srp_dev->use_fast_reg) {
996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002 if (!req->indirect_desc)
1005 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006 target->indirect_size,
1008 if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009 srp_exit_cmd_priv(shost, cmd);
1013 req->indirect_dma_addr = dma_addr;
1021 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1022 * @shost: SCSI host whose attributes to remove from sysfs.
1024 * Note: Any attributes defined in the host template and that did not exist
1025 * before invocation of this function will be ignored.
1027 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1029 const struct attribute_group **g;
1030 struct attribute **attr;
1032 for (g = shost->hostt->shost_groups; *g; ++g) {
1033 for (attr = (*g)->attrs; *attr; ++attr) {
1034 struct device_attribute *dev_attr =
1035 container_of(*attr, typeof(*dev_attr), attr);
1037 device_remove_file(&shost->shost_dev, dev_attr);
1042 static void srp_remove_target(struct srp_target_port *target)
1044 struct srp_rdma_ch *ch;
1047 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1049 srp_del_scsi_host_attr(target->scsi_host);
1050 srp_rport_get(target->rport);
1051 srp_remove_host(target->scsi_host);
1052 scsi_remove_host(target->scsi_host);
1053 srp_stop_rport_timers(target->rport);
1054 srp_disconnect_target(target);
1055 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1056 for (i = 0; i < target->ch_count; i++) {
1057 ch = &target->ch[i];
1058 srp_free_ch_ib(target, ch);
1060 cancel_work_sync(&target->tl_err_work);
1061 srp_rport_put(target->rport);
1065 spin_lock(&target->srp_host->target_lock);
1066 list_del(&target->list);
1067 spin_unlock(&target->srp_host->target_lock);
1069 scsi_host_put(target->scsi_host);
1072 static void srp_remove_work(struct work_struct *work)
1074 struct srp_target_port *target =
1075 container_of(work, struct srp_target_port, remove_work);
1077 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1079 srp_remove_target(target);
1082 static void srp_rport_delete(struct srp_rport *rport)
1084 struct srp_target_port *target = rport->lld_data;
1086 srp_queue_remove_work(target);
1090 * srp_connected_ch() - number of connected channels
1091 * @target: SRP target port.
1093 static int srp_connected_ch(struct srp_target_port *target)
1097 for (i = 0; i < target->ch_count; i++)
1098 c += target->ch[i].connected;
1103 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1106 struct srp_target_port *target = ch->target;
1109 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1111 ret = srp_lookup_path(ch);
1116 init_completion(&ch->done);
1117 ret = srp_send_req(ch, max_iu_len, multich);
1120 ret = wait_for_completion_interruptible(&ch->done);
1125 * The CM event handling code will set status to
1126 * SRP_PORT_REDIRECT if we get a port redirect REJ
1127 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1128 * redirect REJ back.
1133 ch->connected = true;
1136 case SRP_PORT_REDIRECT:
1137 ret = srp_lookup_path(ch);
1142 case SRP_DLID_REDIRECT:
1145 case SRP_STALE_CONN:
1146 shost_printk(KERN_ERR, target->scsi_host, PFX
1147 "giving up on stale connection\n");
1157 return ret <= 0 ? ret : -ENODEV;
1160 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1162 srp_handle_qp_err(cq, wc, "INV RKEY");
1165 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1168 struct ib_send_wr wr = {
1169 .opcode = IB_WR_LOCAL_INV,
1173 .ex.invalidate_rkey = rkey,
1176 wr.wr_cqe = &req->reg_cqe;
1177 req->reg_cqe.done = srp_inv_rkey_err_done;
1178 return ib_post_send(ch->qp, &wr, NULL);
1181 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1182 struct srp_rdma_ch *ch,
1183 struct srp_request *req)
1185 struct srp_target_port *target = ch->target;
1186 struct srp_device *dev = target->srp_host->srp_dev;
1187 struct ib_device *ibdev = dev->dev;
1190 if (!scsi_sglist(scmnd) ||
1191 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1192 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1195 if (dev->use_fast_reg) {
1196 struct srp_fr_desc **pfr;
1198 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1199 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1201 shost_printk(KERN_ERR, target->scsi_host, PFX
1202 "Queueing INV WR for rkey %#x failed (%d)\n",
1203 (*pfr)->mr->rkey, res);
1204 queue_work(system_long_wq,
1205 &target->tl_err_work);
1209 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1213 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1214 scmnd->sc_data_direction);
1218 * srp_claim_req - Take ownership of the scmnd associated with a request.
1219 * @ch: SRP RDMA channel.
1220 * @req: SRP request.
1221 * @sdev: If not NULL, only take ownership for this SCSI device.
1222 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1223 * ownership of @req->scmnd if it equals @scmnd.
1226 * Either NULL or a pointer to the SCSI command the caller became owner of.
1228 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1229 struct srp_request *req,
1230 struct scsi_device *sdev,
1231 struct scsi_cmnd *scmnd)
1233 unsigned long flags;
1235 spin_lock_irqsave(&ch->lock, flags);
1237 (!sdev || req->scmnd->device == sdev) &&
1238 (!scmnd || req->scmnd == scmnd)) {
1244 spin_unlock_irqrestore(&ch->lock, flags);
1250 * srp_free_req() - Unmap data and adjust ch->req_lim.
1251 * @ch: SRP RDMA channel.
1252 * @req: Request to be freed.
1253 * @scmnd: SCSI command associated with @req.
1254 * @req_lim_delta: Amount to be added to @target->req_lim.
1256 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1257 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1259 unsigned long flags;
1261 srp_unmap_data(scmnd, ch, req);
1263 spin_lock_irqsave(&ch->lock, flags);
1264 ch->req_lim += req_lim_delta;
1265 spin_unlock_irqrestore(&ch->lock, flags);
1268 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269 struct scsi_device *sdev, int result)
1271 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1274 srp_free_req(ch, req, scmnd, 0);
1275 scmnd->result = result;
1280 struct srp_terminate_context {
1281 struct srp_target_port *srp_target;
1285 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
1288 struct srp_terminate_context *context = context_ptr;
1289 struct srp_target_port *target = context->srp_target;
1290 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1291 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1292 struct srp_request *req = scsi_cmd_priv(scmnd);
1294 srp_finish_req(ch, req, NULL, context->scsi_result);
1299 static void srp_terminate_io(struct srp_rport *rport)
1301 struct srp_target_port *target = rport->lld_data;
1302 struct srp_terminate_context context = { .srp_target = target,
1303 .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1305 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1308 /* Calculate maximum initiator to target information unit length. */
1309 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1310 uint32_t max_it_iu_size)
1312 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1313 sizeof(struct srp_indirect_buf) +
1314 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1317 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1321 max_iu_len = min(max_iu_len, max_it_iu_size);
1323 pr_debug("max_iu_len = %d\n", max_iu_len);
1329 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1330 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1331 * srp_reset_device() or srp_reset_host() calls will occur while this function
1332 * is in progress. One way to realize that is not to call this function
1333 * directly but to call srp_reconnect_rport() instead since that last function
1334 * serializes calls of this function via rport->mutex and also blocks
1335 * srp_queuecommand() calls before invoking this function.
1337 static int srp_rport_reconnect(struct srp_rport *rport)
1339 struct srp_target_port *target = rport->lld_data;
1340 struct srp_rdma_ch *ch;
1341 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1343 target->max_it_iu_size);
1345 bool multich = false;
1347 srp_disconnect_target(target);
1349 if (target->state == SRP_TARGET_SCANNING)
1353 * Now get a new local CM ID so that we avoid confusing the target in
1354 * case things are really fouled up. Doing so also ensures that all CM
1355 * callbacks will have finished before a new QP is allocated.
1357 for (i = 0; i < target->ch_count; i++) {
1358 ch = &target->ch[i];
1359 ret += srp_new_cm_id(ch);
1362 struct srp_terminate_context context = {
1363 .srp_target = target, .scsi_result = DID_RESET << 16};
1365 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1368 for (i = 0; i < target->ch_count; i++) {
1369 ch = &target->ch[i];
1371 * Whether or not creating a new CM ID succeeded, create a new
1372 * QP. This guarantees that all completion callback function
1373 * invocations have finished before request resetting starts.
1375 ret += srp_create_ch_ib(ch);
1377 INIT_LIST_HEAD(&ch->free_tx);
1378 for (j = 0; j < target->queue_size; ++j)
1379 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1382 target->qp_in_error = false;
1384 for (i = 0; i < target->ch_count; i++) {
1385 ch = &target->ch[i];
1388 ret = srp_connect_ch(ch, max_iu_len, multich);
1393 shost_printk(KERN_INFO, target->scsi_host,
1394 PFX "reconnect succeeded\n");
1399 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1400 unsigned int dma_len, u32 rkey)
1402 struct srp_direct_buf *desc = state->desc;
1404 WARN_ON_ONCE(!dma_len);
1406 desc->va = cpu_to_be64(dma_addr);
1407 desc->key = cpu_to_be32(rkey);
1408 desc->len = cpu_to_be32(dma_len);
1410 state->total_len += dma_len;
1415 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1417 srp_handle_qp_err(cq, wc, "FAST REG");
1421 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1422 * where to start in the first element. If sg_offset_p != NULL then
1423 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1424 * byte that has not yet been mapped.
1426 static int srp_map_finish_fr(struct srp_map_state *state,
1427 struct srp_request *req,
1428 struct srp_rdma_ch *ch, int sg_nents,
1429 unsigned int *sg_offset_p)
1431 struct srp_target_port *target = ch->target;
1432 struct srp_device *dev = target->srp_host->srp_dev;
1433 struct ib_reg_wr wr;
1434 struct srp_fr_desc *desc;
1438 if (state->fr.next >= state->fr.end) {
1439 shost_printk(KERN_ERR, ch->target->scsi_host,
1440 PFX "Out of MRs (mr_per_cmd = %d)\n",
1441 ch->target->mr_per_cmd);
1445 WARN_ON_ONCE(!dev->use_fast_reg);
1447 if (sg_nents == 1 && target->global_rkey) {
1448 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1450 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1451 sg_dma_len(state->sg) - sg_offset,
1452 target->global_rkey);
1458 desc = srp_fr_pool_get(ch->fr_pool);
1462 rkey = ib_inc_rkey(desc->mr->rkey);
1463 ib_update_fast_reg_key(desc->mr, rkey);
1465 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1467 if (unlikely(n < 0)) {
1468 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1469 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1470 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1471 sg_offset_p ? *sg_offset_p : -1, n);
1475 WARN_ON_ONCE(desc->mr->length == 0);
1477 req->reg_cqe.done = srp_reg_mr_err_done;
1480 wr.wr.opcode = IB_WR_REG_MR;
1481 wr.wr.wr_cqe = &req->reg_cqe;
1483 wr.wr.send_flags = 0;
1485 wr.key = desc->mr->rkey;
1486 wr.access = (IB_ACCESS_LOCAL_WRITE |
1487 IB_ACCESS_REMOTE_READ |
1488 IB_ACCESS_REMOTE_WRITE);
1490 *state->fr.next++ = desc;
1493 srp_map_desc(state, desc->mr->iova,
1494 desc->mr->length, desc->mr->rkey);
1496 err = ib_post_send(ch->qp, &wr.wr, NULL);
1497 if (unlikely(err)) {
1498 WARN_ON_ONCE(err == -ENOMEM);
1505 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1506 struct srp_request *req, struct scatterlist *scat,
1509 unsigned int sg_offset = 0;
1511 state->fr.next = req->fr_list;
1512 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1521 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1522 if (unlikely(n < 0))
1526 for (i = 0; i < n; i++)
1527 state->sg = sg_next(state->sg);
1533 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1534 struct srp_request *req, struct scatterlist *scat,
1537 struct srp_target_port *target = ch->target;
1538 struct scatterlist *sg;
1541 for_each_sg(scat, sg, count, i) {
1542 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1543 target->global_rkey);
1550 * Register the indirect data buffer descriptor with the HCA.
1552 * Note: since the indirect data buffer descriptor has been allocated with
1553 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1556 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1557 void **next_mr, void **end_mr, u32 idb_len,
1560 struct srp_target_port *target = ch->target;
1561 struct srp_device *dev = target->srp_host->srp_dev;
1562 struct srp_map_state state;
1563 struct srp_direct_buf idb_desc;
1564 struct scatterlist idb_sg[1];
1567 memset(&state, 0, sizeof(state));
1568 memset(&idb_desc, 0, sizeof(idb_desc));
1569 state.gen.next = next_mr;
1570 state.gen.end = end_mr;
1571 state.desc = &idb_desc;
1572 state.base_dma_addr = req->indirect_dma_addr;
1573 state.dma_len = idb_len;
1575 if (dev->use_fast_reg) {
1577 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1578 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1579 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1580 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1582 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1585 WARN_ON_ONCE(ret < 1);
1590 *idb_rkey = idb_desc.key;
1595 static void srp_check_mapping(struct srp_map_state *state,
1596 struct srp_rdma_ch *ch, struct srp_request *req,
1597 struct scatterlist *scat, int count)
1599 struct srp_device *dev = ch->target->srp_host->srp_dev;
1600 struct srp_fr_desc **pfr;
1601 u64 desc_len = 0, mr_len = 0;
1604 for (i = 0; i < state->ndesc; i++)
1605 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1606 if (dev->use_fast_reg)
1607 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1608 mr_len += (*pfr)->mr->length;
1609 if (desc_len != scsi_bufflen(req->scmnd) ||
1610 mr_len > scsi_bufflen(req->scmnd))
1611 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1612 scsi_bufflen(req->scmnd), desc_len, mr_len,
1613 state->ndesc, state->nmdesc);
1617 * srp_map_data() - map SCSI data buffer onto an SRP request
1618 * @scmnd: SCSI command to map
1619 * @ch: SRP RDMA channel
1622 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1623 * mapping failed. The size of any immediate data is not included in the
1626 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1627 struct srp_request *req)
1629 struct srp_target_port *target = ch->target;
1630 struct scatterlist *scat, *sg;
1631 struct srp_cmd *cmd = req->cmd->buf;
1632 int i, len, nents, count, ret;
1633 struct srp_device *dev;
1634 struct ib_device *ibdev;
1635 struct srp_map_state state;
1636 struct srp_indirect_buf *indirect_hdr;
1638 u32 idb_len, table_len;
1642 req->cmd->num_sge = 1;
1644 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1645 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1647 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1648 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1649 shost_printk(KERN_WARNING, target->scsi_host,
1650 PFX "Unhandled data direction %d\n",
1651 scmnd->sc_data_direction);
1655 nents = scsi_sg_count(scmnd);
1656 scat = scsi_sglist(scmnd);
1657 data_len = scsi_bufflen(scmnd);
1659 dev = target->srp_host->srp_dev;
1662 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1663 if (unlikely(count == 0))
1666 if (ch->use_imm_data &&
1667 count <= ch->max_imm_sge &&
1668 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1669 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1670 struct srp_imm_buf *buf;
1671 struct ib_sge *sge = &req->cmd->sge[1];
1673 fmt = SRP_DATA_DESC_IMM;
1674 len = SRP_IMM_DATA_OFFSET;
1676 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1677 buf->len = cpu_to_be32(data_len);
1678 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1679 for_each_sg(scat, sg, count, i) {
1680 sge[i].addr = sg_dma_address(sg);
1681 sge[i].length = sg_dma_len(sg);
1682 sge[i].lkey = target->lkey;
1684 req->cmd->num_sge += count;
1688 fmt = SRP_DATA_DESC_DIRECT;
1689 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1690 sizeof(struct srp_direct_buf);
1692 if (count == 1 && target->global_rkey) {
1694 * The midlayer only generated a single gather/scatter
1695 * entry, or DMA mapping coalesced everything to a
1696 * single entry. So a direct descriptor along with
1697 * the DMA MR suffices.
1699 struct srp_direct_buf *buf;
1701 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1702 buf->va = cpu_to_be64(sg_dma_address(scat));
1703 buf->key = cpu_to_be32(target->global_rkey);
1704 buf->len = cpu_to_be32(sg_dma_len(scat));
1711 * We have more than one scatter/gather entry, so build our indirect
1712 * descriptor table, trying to merge as many entries as we can.
1714 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1716 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1717 target->indirect_size, DMA_TO_DEVICE);
1719 memset(&state, 0, sizeof(state));
1720 state.desc = req->indirect_desc;
1721 if (dev->use_fast_reg)
1722 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1724 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1725 req->nmdesc = state.nmdesc;
1730 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1731 "Memory mapping consistency check");
1732 if (DYNAMIC_DEBUG_BRANCH(ddm))
1733 srp_check_mapping(&state, ch, req, scat, count);
1736 /* We've mapped the request, now pull as much of the indirect
1737 * descriptor table as we can into the command buffer. If this
1738 * target is not using an external indirect table, we are
1739 * guaranteed to fit into the command, as the SCSI layer won't
1740 * give us more S/G entries than we allow.
1742 if (state.ndesc == 1) {
1744 * Memory registration collapsed the sg-list into one entry,
1745 * so use a direct descriptor.
1747 struct srp_direct_buf *buf;
1749 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1750 *buf = req->indirect_desc[0];
1754 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1755 !target->allow_ext_sg)) {
1756 shost_printk(KERN_ERR, target->scsi_host,
1757 "Could not fit S/G list into SRP_CMD\n");
1762 count = min(state.ndesc, target->cmd_sg_cnt);
1763 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1764 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1766 fmt = SRP_DATA_DESC_INDIRECT;
1767 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1768 sizeof(struct srp_indirect_buf);
1769 len += count * sizeof (struct srp_direct_buf);
1771 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1772 count * sizeof (struct srp_direct_buf));
1774 if (!target->global_rkey) {
1775 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1776 idb_len, &idb_rkey);
1781 idb_rkey = cpu_to_be32(target->global_rkey);
1784 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1785 indirect_hdr->table_desc.key = idb_rkey;
1786 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1787 indirect_hdr->len = cpu_to_be32(state.total_len);
1789 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1790 cmd->data_out_desc_cnt = count;
1792 cmd->data_in_desc_cnt = count;
1794 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1798 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1799 cmd->buf_fmt = fmt << 4;
1806 srp_unmap_data(scmnd, ch, req);
1807 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1813 * Return an IU and possible credit to the free pool
1815 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1816 enum srp_iu_type iu_type)
1818 unsigned long flags;
1820 spin_lock_irqsave(&ch->lock, flags);
1821 list_add(&iu->list, &ch->free_tx);
1822 if (iu_type != SRP_IU_RSP)
1824 spin_unlock_irqrestore(&ch->lock, flags);
1828 * Must be called with ch->lock held to protect req_lim and free_tx.
1829 * If IU is not sent, it must be returned using srp_put_tx_iu().
1832 * An upper limit for the number of allocated information units for each
1834 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1835 * more than Scsi_Host.can_queue requests.
1836 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1837 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1838 * one unanswered SRP request to an initiator.
1840 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1841 enum srp_iu_type iu_type)
1843 struct srp_target_port *target = ch->target;
1844 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1847 lockdep_assert_held(&ch->lock);
1849 ib_process_cq_direct(ch->send_cq, -1);
1851 if (list_empty(&ch->free_tx))
1854 /* Initiator responses to target requests do not consume credits */
1855 if (iu_type != SRP_IU_RSP) {
1856 if (ch->req_lim <= rsv) {
1857 ++target->zero_req_lim;
1864 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1865 list_del(&iu->list);
1870 * Note: if this function is called from inside ib_drain_sq() then it will
1871 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1872 * with status IB_WC_SUCCESS then that's a bug.
1874 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1876 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1877 struct srp_rdma_ch *ch = cq->cq_context;
1879 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1880 srp_handle_qp_err(cq, wc, "SEND");
1884 lockdep_assert_held(&ch->lock);
1886 list_add(&iu->list, &ch->free_tx);
1890 * srp_post_send() - send an SRP information unit
1891 * @ch: RDMA channel over which to send the information unit.
1892 * @iu: Information unit to send.
1893 * @len: Length of the information unit excluding immediate data.
1895 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1897 struct srp_target_port *target = ch->target;
1898 struct ib_send_wr wr;
1900 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1903 iu->sge[0].addr = iu->dma;
1904 iu->sge[0].length = len;
1905 iu->sge[0].lkey = target->lkey;
1907 iu->cqe.done = srp_send_done;
1910 wr.wr_cqe = &iu->cqe;
1911 wr.sg_list = &iu->sge[0];
1912 wr.num_sge = iu->num_sge;
1913 wr.opcode = IB_WR_SEND;
1914 wr.send_flags = IB_SEND_SIGNALED;
1916 return ib_post_send(ch->qp, &wr, NULL);
1919 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1921 struct srp_target_port *target = ch->target;
1922 struct ib_recv_wr wr;
1925 list.addr = iu->dma;
1926 list.length = iu->size;
1927 list.lkey = target->lkey;
1929 iu->cqe.done = srp_recv_done;
1932 wr.wr_cqe = &iu->cqe;
1936 return ib_post_recv(ch->qp, &wr, NULL);
1939 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1941 struct srp_target_port *target = ch->target;
1942 struct srp_request *req;
1943 struct scsi_cmnd *scmnd;
1944 unsigned long flags;
1946 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1947 spin_lock_irqsave(&ch->lock, flags);
1948 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1949 if (rsp->tag == ch->tsk_mgmt_tag) {
1950 ch->tsk_mgmt_status = -1;
1951 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1952 ch->tsk_mgmt_status = rsp->data[3];
1953 complete(&ch->tsk_mgmt_done);
1955 shost_printk(KERN_ERR, target->scsi_host,
1956 "Received tsk mgmt response too late for tag %#llx\n",
1959 spin_unlock_irqrestore(&ch->lock, flags);
1961 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1963 req = scsi_cmd_priv(scmnd);
1964 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1966 shost_printk(KERN_ERR, target->scsi_host,
1967 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1968 rsp->tag, ch - target->ch, ch->qp->qp_num);
1970 spin_lock_irqsave(&ch->lock, flags);
1971 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1972 spin_unlock_irqrestore(&ch->lock, flags);
1976 scmnd->result = rsp->status;
1978 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1979 memcpy(scmnd->sense_buffer, rsp->data +
1980 be32_to_cpu(rsp->resp_data_len),
1981 min_t(int, be32_to_cpu(rsp->sense_data_len),
1982 SCSI_SENSE_BUFFERSIZE));
1985 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1986 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1987 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1988 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1989 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1990 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1991 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1992 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1994 srp_free_req(ch, req, scmnd,
1995 be32_to_cpu(rsp->req_lim_delta));
2001 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2004 struct srp_target_port *target = ch->target;
2005 struct ib_device *dev = target->srp_host->srp_dev->dev;
2006 unsigned long flags;
2010 spin_lock_irqsave(&ch->lock, flags);
2011 ch->req_lim += req_delta;
2012 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2013 spin_unlock_irqrestore(&ch->lock, flags);
2016 shost_printk(KERN_ERR, target->scsi_host, PFX
2017 "no IU available to send response\n");
2022 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2023 memcpy(iu->buf, rsp, len);
2024 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2026 err = srp_post_send(ch, iu, len);
2028 shost_printk(KERN_ERR, target->scsi_host, PFX
2029 "unable to post response: %d\n", err);
2030 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2036 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2037 struct srp_cred_req *req)
2039 struct srp_cred_rsp rsp = {
2040 .opcode = SRP_CRED_RSP,
2043 s32 delta = be32_to_cpu(req->req_lim_delta);
2045 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2046 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2047 "problems processing SRP_CRED_REQ\n");
2050 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2051 struct srp_aer_req *req)
2053 struct srp_target_port *target = ch->target;
2054 struct srp_aer_rsp rsp = {
2055 .opcode = SRP_AER_RSP,
2058 s32 delta = be32_to_cpu(req->req_lim_delta);
2060 shost_printk(KERN_ERR, target->scsi_host, PFX
2061 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2063 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2064 shost_printk(KERN_ERR, target->scsi_host, PFX
2065 "problems processing SRP_AER_REQ\n");
2068 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2070 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2071 struct srp_rdma_ch *ch = cq->cq_context;
2072 struct srp_target_port *target = ch->target;
2073 struct ib_device *dev = target->srp_host->srp_dev->dev;
2077 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2078 srp_handle_qp_err(cq, wc, "RECV");
2082 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2085 opcode = *(u8 *) iu->buf;
2088 shost_printk(KERN_ERR, target->scsi_host,
2089 PFX "recv completion, opcode 0x%02x\n", opcode);
2090 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2091 iu->buf, wc->byte_len, true);
2096 srp_process_rsp(ch, iu->buf);
2100 srp_process_cred_req(ch, iu->buf);
2104 srp_process_aer_req(ch, iu->buf);
2108 /* XXX Handle target logout */
2109 shost_printk(KERN_WARNING, target->scsi_host,
2110 PFX "Got target logout request\n");
2114 shost_printk(KERN_WARNING, target->scsi_host,
2115 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2119 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2122 res = srp_post_recv(ch, iu);
2124 shost_printk(KERN_ERR, target->scsi_host,
2125 PFX "Recv failed with error code %d\n", res);
2129 * srp_tl_err_work() - handle a transport layer error
2130 * @work: Work structure embedded in an SRP target port.
2132 * Note: This function may get invoked before the rport has been created,
2133 * hence the target->rport test.
2135 static void srp_tl_err_work(struct work_struct *work)
2137 struct srp_target_port *target;
2139 target = container_of(work, struct srp_target_port, tl_err_work);
2141 srp_start_tl_fail_timers(target->rport);
2144 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2147 struct srp_rdma_ch *ch = cq->cq_context;
2148 struct srp_target_port *target = ch->target;
2150 if (ch->connected && !target->qp_in_error) {
2151 shost_printk(KERN_ERR, target->scsi_host,
2152 PFX "failed %s status %s (%d) for CQE %p\n",
2153 opname, ib_wc_status_msg(wc->status), wc->status,
2155 queue_work(system_long_wq, &target->tl_err_work);
2157 target->qp_in_error = true;
2160 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2162 struct request *rq = scsi_cmd_to_rq(scmnd);
2163 struct srp_target_port *target = host_to_target(shost);
2164 struct srp_rdma_ch *ch;
2165 struct srp_request *req = scsi_cmd_priv(scmnd);
2167 struct srp_cmd *cmd;
2168 struct ib_device *dev;
2169 unsigned long flags;
2173 scmnd->result = srp_chkready(target->rport);
2174 if (unlikely(scmnd->result))
2177 WARN_ON_ONCE(rq->tag < 0);
2178 tag = blk_mq_unique_tag(rq);
2179 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2181 spin_lock_irqsave(&ch->lock, flags);
2182 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2183 spin_unlock_irqrestore(&ch->lock, flags);
2188 dev = target->srp_host->srp_dev->dev;
2189 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2193 memset(cmd, 0, sizeof *cmd);
2195 cmd->opcode = SRP_CMD;
2196 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2198 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2199 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2200 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2202 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2209 len = srp_map_data(scmnd, ch, req);
2211 shost_printk(KERN_ERR, target->scsi_host,
2212 PFX "Failed to map data (%d)\n", len);
2214 * If we ran out of memory descriptors (-ENOMEM) because an
2215 * application is queuing many requests with more than
2216 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2217 * to reduce queue depth temporarily.
2219 scmnd->result = len == -ENOMEM ?
2220 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2224 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2227 if (srp_post_send(ch, iu, len)) {
2228 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2229 scmnd->result = DID_ERROR << 16;
2236 srp_unmap_data(scmnd, ch, req);
2239 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2242 * Avoid that the loops that iterate over the request ring can
2243 * encounter a dangling SCSI command pointer.
2248 if (scmnd->result) {
2252 ret = SCSI_MLQUEUE_HOST_BUSY;
2259 * Note: the resources allocated in this function are freed in
2262 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2264 struct srp_target_port *target = ch->target;
2267 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2271 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2276 for (i = 0; i < target->queue_size; ++i) {
2277 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2279 GFP_KERNEL, DMA_FROM_DEVICE);
2280 if (!ch->rx_ring[i])
2284 for (i = 0; i < target->queue_size; ++i) {
2285 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2287 GFP_KERNEL, DMA_TO_DEVICE);
2288 if (!ch->tx_ring[i])
2291 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2297 for (i = 0; i < target->queue_size; ++i) {
2298 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2299 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2312 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2314 uint64_t T_tr_ns, max_compl_time_ms;
2315 uint32_t rq_tmo_jiffies;
2318 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2319 * table 91), both the QP timeout and the retry count have to be set
2320 * for RC QP's during the RTR to RTS transition.
2322 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2323 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2326 * Set target->rq_tmo_jiffies to one second more than the largest time
2327 * it can take before an error completion is generated. See also
2328 * C9-140..142 in the IBTA spec for more information about how to
2329 * convert the QP Local ACK Timeout value to nanoseconds.
2331 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2332 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2333 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2334 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2336 return rq_tmo_jiffies;
2339 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2340 const struct srp_login_rsp *lrsp,
2341 struct srp_rdma_ch *ch)
2343 struct srp_target_port *target = ch->target;
2344 struct ib_qp_attr *qp_attr = NULL;
2349 if (lrsp->opcode == SRP_LOGIN_RSP) {
2350 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2351 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2352 ch->use_imm_data = srp_use_imm_data &&
2353 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2354 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2356 target->max_it_iu_size);
2357 WARN_ON_ONCE(ch->max_it_iu_len >
2358 be32_to_cpu(lrsp->max_it_iu_len));
2360 if (ch->use_imm_data)
2361 shost_printk(KERN_DEBUG, target->scsi_host,
2362 PFX "using immediate data\n");
2365 * Reserve credits for task management so we don't
2366 * bounce requests back to the SCSI mid-layer.
2368 target->scsi_host->can_queue
2369 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2370 target->scsi_host->can_queue);
2371 target->scsi_host->cmd_per_lun
2372 = min_t(int, target->scsi_host->can_queue,
2373 target->scsi_host->cmd_per_lun);
2375 shost_printk(KERN_WARNING, target->scsi_host,
2376 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2382 ret = srp_alloc_iu_bufs(ch);
2387 for (i = 0; i < target->queue_size; i++) {
2388 struct srp_iu *iu = ch->rx_ring[i];
2390 ret = srp_post_recv(ch, iu);
2395 if (!target->using_rdma_cm) {
2397 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2401 qp_attr->qp_state = IB_QPS_RTR;
2402 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2406 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2410 qp_attr->qp_state = IB_QPS_RTS;
2411 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2415 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2417 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2421 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2431 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2432 const struct ib_cm_event *event,
2433 struct srp_rdma_ch *ch)
2435 struct srp_target_port *target = ch->target;
2436 struct Scsi_Host *shost = target->scsi_host;
2437 struct ib_class_port_info *cpi;
2441 switch (event->param.rej_rcvd.reason) {
2442 case IB_CM_REJ_PORT_CM_REDIRECT:
2443 cpi = event->param.rej_rcvd.ari;
2444 dlid = be16_to_cpu(cpi->redirect_lid);
2445 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2446 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2447 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2448 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2450 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2453 case IB_CM_REJ_PORT_REDIRECT:
2454 if (srp_target_is_topspin(target)) {
2455 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2458 * Topspin/Cisco SRP gateways incorrectly send
2459 * reject reason code 25 when they mean 24
2462 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2464 shost_printk(KERN_DEBUG, shost,
2465 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2466 be64_to_cpu(dgid->global.subnet_prefix),
2467 be64_to_cpu(dgid->global.interface_id));
2469 ch->status = SRP_PORT_REDIRECT;
2471 shost_printk(KERN_WARNING, shost,
2472 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2473 ch->status = -ECONNRESET;
2477 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2478 shost_printk(KERN_WARNING, shost,
2479 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2480 ch->status = -ECONNRESET;
2483 case IB_CM_REJ_CONSUMER_DEFINED:
2484 opcode = *(u8 *) event->private_data;
2485 if (opcode == SRP_LOGIN_REJ) {
2486 struct srp_login_rej *rej = event->private_data;
2487 u32 reason = be32_to_cpu(rej->reason);
2489 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2490 shost_printk(KERN_WARNING, shost,
2491 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2493 shost_printk(KERN_WARNING, shost, PFX
2494 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2496 target->ib_cm.orig_dgid.raw,
2499 shost_printk(KERN_WARNING, shost,
2500 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2501 " opcode 0x%02x\n", opcode);
2502 ch->status = -ECONNRESET;
2505 case IB_CM_REJ_STALE_CONN:
2506 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2507 ch->status = SRP_STALE_CONN;
2511 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2512 event->param.rej_rcvd.reason);
2513 ch->status = -ECONNRESET;
2517 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2518 const struct ib_cm_event *event)
2520 struct srp_rdma_ch *ch = cm_id->context;
2521 struct srp_target_port *target = ch->target;
2524 switch (event->event) {
2525 case IB_CM_REQ_ERROR:
2526 shost_printk(KERN_DEBUG, target->scsi_host,
2527 PFX "Sending CM REQ failed\n");
2529 ch->status = -ECONNRESET;
2532 case IB_CM_REP_RECEIVED:
2534 srp_cm_rep_handler(cm_id, event->private_data, ch);
2537 case IB_CM_REJ_RECEIVED:
2538 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2541 srp_ib_cm_rej_handler(cm_id, event, ch);
2544 case IB_CM_DREQ_RECEIVED:
2545 shost_printk(KERN_WARNING, target->scsi_host,
2546 PFX "DREQ received - connection closed\n");
2547 ch->connected = false;
2548 if (ib_send_cm_drep(cm_id, NULL, 0))
2549 shost_printk(KERN_ERR, target->scsi_host,
2550 PFX "Sending CM DREP failed\n");
2551 queue_work(system_long_wq, &target->tl_err_work);
2554 case IB_CM_TIMEWAIT_EXIT:
2555 shost_printk(KERN_ERR, target->scsi_host,
2556 PFX "connection closed\n");
2562 case IB_CM_MRA_RECEIVED:
2563 case IB_CM_DREQ_ERROR:
2564 case IB_CM_DREP_RECEIVED:
2568 shost_printk(KERN_WARNING, target->scsi_host,
2569 PFX "Unhandled CM event %d\n", event->event);
2574 complete(&ch->done);
2579 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2580 struct rdma_cm_event *event)
2582 struct srp_target_port *target = ch->target;
2583 struct Scsi_Host *shost = target->scsi_host;
2586 switch (event->status) {
2587 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2588 shost_printk(KERN_WARNING, shost,
2589 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2590 ch->status = -ECONNRESET;
2593 case IB_CM_REJ_CONSUMER_DEFINED:
2594 opcode = *(u8 *) event->param.conn.private_data;
2595 if (opcode == SRP_LOGIN_REJ) {
2596 struct srp_login_rej *rej =
2597 (struct srp_login_rej *)
2598 event->param.conn.private_data;
2599 u32 reason = be32_to_cpu(rej->reason);
2601 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2602 shost_printk(KERN_WARNING, shost,
2603 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2605 shost_printk(KERN_WARNING, shost,
2606 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2608 shost_printk(KERN_WARNING, shost,
2609 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2612 ch->status = -ECONNRESET;
2615 case IB_CM_REJ_STALE_CONN:
2616 shost_printk(KERN_WARNING, shost,
2617 " REJ reason: stale connection\n");
2618 ch->status = SRP_STALE_CONN;
2622 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2624 ch->status = -ECONNRESET;
2629 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2630 struct rdma_cm_event *event)
2632 struct srp_rdma_ch *ch = cm_id->context;
2633 struct srp_target_port *target = ch->target;
2636 switch (event->event) {
2637 case RDMA_CM_EVENT_ADDR_RESOLVED:
2642 case RDMA_CM_EVENT_ADDR_ERROR:
2643 ch->status = -ENXIO;
2647 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2652 case RDMA_CM_EVENT_ROUTE_ERROR:
2653 case RDMA_CM_EVENT_UNREACHABLE:
2654 ch->status = -EHOSTUNREACH;
2658 case RDMA_CM_EVENT_CONNECT_ERROR:
2659 shost_printk(KERN_DEBUG, target->scsi_host,
2660 PFX "Sending CM REQ failed\n");
2662 ch->status = -ECONNRESET;
2665 case RDMA_CM_EVENT_ESTABLISHED:
2667 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2670 case RDMA_CM_EVENT_REJECTED:
2671 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2674 srp_rdma_cm_rej_handler(ch, event);
2677 case RDMA_CM_EVENT_DISCONNECTED:
2678 if (ch->connected) {
2679 shost_printk(KERN_WARNING, target->scsi_host,
2680 PFX "received DREQ\n");
2681 rdma_disconnect(ch->rdma_cm.cm_id);
2684 queue_work(system_long_wq, &target->tl_err_work);
2688 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2689 shost_printk(KERN_ERR, target->scsi_host,
2690 PFX "connection closed\n");
2697 shost_printk(KERN_WARNING, target->scsi_host,
2698 PFX "Unhandled CM event %d\n", event->event);
2703 complete(&ch->done);
2709 * srp_change_queue_depth - setting device queue depth
2710 * @sdev: scsi device struct
2711 * @qdepth: requested queue depth
2713 * Returns queue depth.
2716 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2718 if (!sdev->tagged_supported)
2720 return scsi_change_queue_depth(sdev, qdepth);
2723 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2724 u8 func, u8 *status)
2726 struct srp_target_port *target = ch->target;
2727 struct srp_rport *rport = target->rport;
2728 struct ib_device *dev = target->srp_host->srp_dev->dev;
2730 struct srp_tsk_mgmt *tsk_mgmt;
2733 if (!ch->connected || target->qp_in_error)
2737 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2738 * invoked while a task management function is being sent.
2740 mutex_lock(&rport->mutex);
2741 spin_lock_irq(&ch->lock);
2742 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2743 spin_unlock_irq(&ch->lock);
2746 mutex_unlock(&rport->mutex);
2753 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2756 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2758 tsk_mgmt->opcode = SRP_TSK_MGMT;
2759 int_to_scsilun(lun, &tsk_mgmt->lun);
2760 tsk_mgmt->tsk_mgmt_func = func;
2761 tsk_mgmt->task_tag = req_tag;
2763 spin_lock_irq(&ch->lock);
2764 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2765 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2766 spin_unlock_irq(&ch->lock);
2768 init_completion(&ch->tsk_mgmt_done);
2770 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2772 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2773 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2774 mutex_unlock(&rport->mutex);
2778 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2779 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2780 if (res > 0 && status)
2781 *status = ch->tsk_mgmt_status;
2782 mutex_unlock(&rport->mutex);
2784 WARN_ON_ONCE(res < 0);
2786 return res > 0 ? 0 : -1;
2789 static int srp_abort(struct scsi_cmnd *scmnd)
2791 struct srp_target_port *target = host_to_target(scmnd->device->host);
2792 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2795 struct srp_rdma_ch *ch;
2798 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2802 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2803 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2804 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2806 ch = &target->ch[ch_idx];
2807 if (!srp_claim_req(ch, req, NULL, scmnd))
2809 shost_printk(KERN_ERR, target->scsi_host,
2810 "Sending SRP abort for tag %#x\n", tag);
2811 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2812 SRP_TSK_ABORT_TASK, NULL) == 0)
2814 else if (target->rport->state == SRP_RPORT_LOST)
2818 if (ret == SUCCESS) {
2819 srp_free_req(ch, req, scmnd, 0);
2820 scmnd->result = DID_ABORT << 16;
2827 static int srp_reset_device(struct scsi_cmnd *scmnd)
2829 struct srp_target_port *target = host_to_target(scmnd->device->host);
2830 struct srp_rdma_ch *ch;
2833 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2835 ch = &target->ch[0];
2836 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2837 SRP_TSK_LUN_RESET, &status))
2845 static int srp_reset_host(struct scsi_cmnd *scmnd)
2847 struct srp_target_port *target = host_to_target(scmnd->device->host);
2849 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2851 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2854 static int srp_target_alloc(struct scsi_target *starget)
2856 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2857 struct srp_target_port *target = host_to_target(shost);
2859 if (target->target_can_queue)
2860 starget->can_queue = target->target_can_queue;
2864 static int srp_slave_configure(struct scsi_device *sdev)
2866 struct Scsi_Host *shost = sdev->host;
2867 struct srp_target_port *target = host_to_target(shost);
2868 struct request_queue *q = sdev->request_queue;
2869 unsigned long timeout;
2871 if (sdev->type == TYPE_DISK) {
2872 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2873 blk_queue_rq_timeout(q, timeout);
2879 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2882 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2884 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2887 static DEVICE_ATTR_RO(id_ext);
2889 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2892 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2894 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2897 static DEVICE_ATTR_RO(ioc_guid);
2899 static ssize_t service_id_show(struct device *dev,
2900 struct device_attribute *attr, char *buf)
2902 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2904 if (target->using_rdma_cm)
2906 return sysfs_emit(buf, "0x%016llx\n",
2907 be64_to_cpu(target->ib_cm.service_id));
2910 static DEVICE_ATTR_RO(service_id);
2912 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2915 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2917 if (target->using_rdma_cm)
2920 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2923 static DEVICE_ATTR_RO(pkey);
2925 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2928 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2930 return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2933 static DEVICE_ATTR_RO(sgid);
2935 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2938 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2939 struct srp_rdma_ch *ch = &target->ch[0];
2941 if (target->using_rdma_cm)
2944 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2947 static DEVICE_ATTR_RO(dgid);
2949 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2952 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2954 if (target->using_rdma_cm)
2957 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2960 static DEVICE_ATTR_RO(orig_dgid);
2962 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2965 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2966 struct srp_rdma_ch *ch;
2967 int i, req_lim = INT_MAX;
2969 for (i = 0; i < target->ch_count; i++) {
2970 ch = &target->ch[i];
2971 req_lim = min(req_lim, ch->req_lim);
2974 return sysfs_emit(buf, "%d\n", req_lim);
2977 static DEVICE_ATTR_RO(req_lim);
2979 static ssize_t zero_req_lim_show(struct device *dev,
2980 struct device_attribute *attr, char *buf)
2982 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2984 return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2987 static DEVICE_ATTR_RO(zero_req_lim);
2989 static ssize_t local_ib_port_show(struct device *dev,
2990 struct device_attribute *attr, char *buf)
2992 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2994 return sysfs_emit(buf, "%d\n", target->srp_host->port);
2997 static DEVICE_ATTR_RO(local_ib_port);
2999 static ssize_t local_ib_device_show(struct device *dev,
3000 struct device_attribute *attr, char *buf)
3002 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3004 return sysfs_emit(buf, "%s\n",
3005 dev_name(&target->srp_host->srp_dev->dev->dev));
3008 static DEVICE_ATTR_RO(local_ib_device);
3010 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3013 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3015 return sysfs_emit(buf, "%d\n", target->ch_count);
3018 static DEVICE_ATTR_RO(ch_count);
3020 static ssize_t comp_vector_show(struct device *dev,
3021 struct device_attribute *attr, char *buf)
3023 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3025 return sysfs_emit(buf, "%d\n", target->comp_vector);
3028 static DEVICE_ATTR_RO(comp_vector);
3030 static ssize_t tl_retry_count_show(struct device *dev,
3031 struct device_attribute *attr, char *buf)
3033 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3035 return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3038 static DEVICE_ATTR_RO(tl_retry_count);
3040 static ssize_t cmd_sg_entries_show(struct device *dev,
3041 struct device_attribute *attr, char *buf)
3043 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3045 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3048 static DEVICE_ATTR_RO(cmd_sg_entries);
3050 static ssize_t allow_ext_sg_show(struct device *dev,
3051 struct device_attribute *attr, char *buf)
3053 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3055 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3058 static DEVICE_ATTR_RO(allow_ext_sg);
3060 static struct attribute *srp_host_attrs[] = {
3061 &dev_attr_id_ext.attr,
3062 &dev_attr_ioc_guid.attr,
3063 &dev_attr_service_id.attr,
3064 &dev_attr_pkey.attr,
3065 &dev_attr_sgid.attr,
3066 &dev_attr_dgid.attr,
3067 &dev_attr_orig_dgid.attr,
3068 &dev_attr_req_lim.attr,
3069 &dev_attr_zero_req_lim.attr,
3070 &dev_attr_local_ib_port.attr,
3071 &dev_attr_local_ib_device.attr,
3072 &dev_attr_ch_count.attr,
3073 &dev_attr_comp_vector.attr,
3074 &dev_attr_tl_retry_count.attr,
3075 &dev_attr_cmd_sg_entries.attr,
3076 &dev_attr_allow_ext_sg.attr,
3080 ATTRIBUTE_GROUPS(srp_host);
3082 static struct scsi_host_template srp_template = {
3083 .module = THIS_MODULE,
3084 .name = "InfiniBand SRP initiator",
3085 .proc_name = DRV_NAME,
3086 .target_alloc = srp_target_alloc,
3087 .slave_configure = srp_slave_configure,
3088 .info = srp_target_info,
3089 .init_cmd_priv = srp_init_cmd_priv,
3090 .exit_cmd_priv = srp_exit_cmd_priv,
3091 .queuecommand = srp_queuecommand,
3092 .change_queue_depth = srp_change_queue_depth,
3093 .eh_timed_out = srp_timed_out,
3094 .eh_abort_handler = srp_abort,
3095 .eh_device_reset_handler = srp_reset_device,
3096 .eh_host_reset_handler = srp_reset_host,
3097 .skip_settle_delay = true,
3098 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3099 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3101 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3102 .shost_groups = srp_host_groups,
3103 .track_queue_depth = 1,
3104 .cmd_size = sizeof(struct srp_request),
3107 static int srp_sdev_count(struct Scsi_Host *host)
3109 struct scsi_device *sdev;
3112 shost_for_each_device(sdev, host)
3120 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3121 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3122 * removal has been scheduled.
3123 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3125 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3127 struct srp_rport_identifiers ids;
3128 struct srp_rport *rport;
3130 target->state = SRP_TARGET_SCANNING;
3131 sprintf(target->target_name, "SRP.T10:%016llX",
3132 be64_to_cpu(target->id_ext));
3134 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3137 memcpy(ids.port_id, &target->id_ext, 8);
3138 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3139 ids.roles = SRP_RPORT_ROLE_TARGET;
3140 rport = srp_rport_add(target->scsi_host, &ids);
3141 if (IS_ERR(rport)) {
3142 scsi_remove_host(target->scsi_host);
3143 return PTR_ERR(rport);
3146 rport->lld_data = target;
3147 target->rport = rport;
3149 spin_lock(&host->target_lock);
3150 list_add_tail(&target->list, &host->target_list);
3151 spin_unlock(&host->target_lock);
3153 scsi_scan_target(&target->scsi_host->shost_gendev,
3154 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3156 if (srp_connected_ch(target) < target->ch_count ||
3157 target->qp_in_error) {
3158 shost_printk(KERN_INFO, target->scsi_host,
3159 PFX "SCSI scan failed - removing SCSI host\n");
3160 srp_queue_remove_work(target);
3164 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3165 dev_name(&target->scsi_host->shost_gendev),
3166 srp_sdev_count(target->scsi_host));
3168 spin_lock_irq(&target->lock);
3169 if (target->state == SRP_TARGET_SCANNING)
3170 target->state = SRP_TARGET_LIVE;
3171 spin_unlock_irq(&target->lock);
3177 static void srp_release_dev(struct device *dev)
3179 struct srp_host *host =
3180 container_of(dev, struct srp_host, dev);
3182 complete(&host->released);
3185 static struct class srp_class = {
3186 .name = "infiniband_srp",
3187 .dev_release = srp_release_dev
3191 * srp_conn_unique() - check whether the connection to a target is unique
3193 * @target: SRP target port.
3195 static bool srp_conn_unique(struct srp_host *host,
3196 struct srp_target_port *target)
3198 struct srp_target_port *t;
3201 if (target->state == SRP_TARGET_REMOVED)
3206 spin_lock(&host->target_lock);
3207 list_for_each_entry(t, &host->target_list, list) {
3209 target->id_ext == t->id_ext &&
3210 target->ioc_guid == t->ioc_guid &&
3211 target->initiator_ext == t->initiator_ext) {
3216 spin_unlock(&host->target_lock);
3223 * Target ports are added by writing
3225 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3226 * pkey=<P_Key>,service_id=<service ID>
3228 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3229 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3231 * to the add_target sysfs attribute.
3235 SRP_OPT_ID_EXT = 1 << 0,
3236 SRP_OPT_IOC_GUID = 1 << 1,
3237 SRP_OPT_DGID = 1 << 2,
3238 SRP_OPT_PKEY = 1 << 3,
3239 SRP_OPT_SERVICE_ID = 1 << 4,
3240 SRP_OPT_MAX_SECT = 1 << 5,
3241 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3242 SRP_OPT_IO_CLASS = 1 << 7,
3243 SRP_OPT_INITIATOR_EXT = 1 << 8,
3244 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3245 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3246 SRP_OPT_SG_TABLESIZE = 1 << 11,
3247 SRP_OPT_COMP_VECTOR = 1 << 12,
3248 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3249 SRP_OPT_QUEUE_SIZE = 1 << 14,
3250 SRP_OPT_IP_SRC = 1 << 15,
3251 SRP_OPT_IP_DEST = 1 << 16,
3252 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3253 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3254 SRP_OPT_CH_COUNT = 1 << 19,
3257 static unsigned int srp_opt_mandatory[] = {
3268 static const match_table_t srp_opt_tokens = {
3269 { SRP_OPT_ID_EXT, "id_ext=%s" },
3270 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3271 { SRP_OPT_DGID, "dgid=%s" },
3272 { SRP_OPT_PKEY, "pkey=%x" },
3273 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3274 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3275 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3276 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3277 { SRP_OPT_IO_CLASS, "io_class=%x" },
3278 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3279 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3280 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3281 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3282 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3283 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3284 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3285 { SRP_OPT_IP_SRC, "src=%s" },
3286 { SRP_OPT_IP_DEST, "dest=%s" },
3287 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3288 { SRP_OPT_CH_COUNT, "ch_count=%u", },
3289 { SRP_OPT_ERR, NULL }
3293 * srp_parse_in - parse an IP address and port number combination
3294 * @net: [in] Network namespace.
3295 * @sa: [out] Address family, IP address and port number.
3296 * @addr_port_str: [in] IP address and port number.
3297 * @has_port: [out] Whether or not @addr_port_str includes a port number.
3299 * Parse the following address formats:
3300 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3301 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3303 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3304 const char *addr_port_str, bool *has_port)
3306 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3312 port_str = strrchr(addr, ':');
3313 if (port_str && strchr(port_str, ']'))
3318 *has_port = port_str != NULL;
3319 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3320 if (ret && addr[0]) {
3321 addr_end = addr + strlen(addr) - 1;
3322 if (addr[0] == '[' && *addr_end == ']') {
3324 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3329 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3333 static int srp_parse_options(struct net *net, const char *buf,
3334 struct srp_target_port *target)
3336 char *options, *sep_opt;
3338 substring_t args[MAX_OPT_ARGS];
3339 unsigned long long ull;
3346 options = kstrdup(buf, GFP_KERNEL);
3351 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3355 token = match_token(p, srp_opt_tokens, args);
3359 case SRP_OPT_ID_EXT:
3360 p = match_strdup(args);
3365 ret = kstrtoull(p, 16, &ull);
3367 pr_warn("invalid id_ext parameter '%s'\n", p);
3371 target->id_ext = cpu_to_be64(ull);
3375 case SRP_OPT_IOC_GUID:
3376 p = match_strdup(args);
3381 ret = kstrtoull(p, 16, &ull);
3383 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3387 target->ioc_guid = cpu_to_be64(ull);
3392 p = match_strdup(args);
3397 if (strlen(p) != 32) {
3398 pr_warn("bad dest GID parameter '%s'\n", p);
3403 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3410 if (match_hex(args, &token)) {
3411 pr_warn("bad P_Key parameter '%s'\n", p);
3414 target->ib_cm.pkey = cpu_to_be16(token);
3417 case SRP_OPT_SERVICE_ID:
3418 p = match_strdup(args);
3423 ret = kstrtoull(p, 16, &ull);
3425 pr_warn("bad service_id parameter '%s'\n", p);
3429 target->ib_cm.service_id = cpu_to_be64(ull);
3433 case SRP_OPT_IP_SRC:
3434 p = match_strdup(args);
3439 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3442 pr_warn("bad source parameter '%s'\n", p);
3446 target->rdma_cm.src_specified = true;
3450 case SRP_OPT_IP_DEST:
3451 p = match_strdup(args);
3456 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3461 pr_warn("bad dest parameter '%s'\n", p);
3465 target->using_rdma_cm = true;
3469 case SRP_OPT_MAX_SECT:
3470 if (match_int(args, &token)) {
3471 pr_warn("bad max sect parameter '%s'\n", p);
3474 target->scsi_host->max_sectors = token;
3477 case SRP_OPT_QUEUE_SIZE:
3478 if (match_int(args, &token) || token < 1) {
3479 pr_warn("bad queue_size parameter '%s'\n", p);
3482 target->scsi_host->can_queue = token;
3483 target->queue_size = token + SRP_RSP_SQ_SIZE +
3484 SRP_TSK_MGMT_SQ_SIZE;
3485 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3486 target->scsi_host->cmd_per_lun = token;
3489 case SRP_OPT_MAX_CMD_PER_LUN:
3490 if (match_int(args, &token) || token < 1) {
3491 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3495 target->scsi_host->cmd_per_lun = token;
3498 case SRP_OPT_TARGET_CAN_QUEUE:
3499 if (match_int(args, &token) || token < 1) {
3500 pr_warn("bad max target_can_queue parameter '%s'\n",
3504 target->target_can_queue = token;
3507 case SRP_OPT_IO_CLASS:
3508 if (match_hex(args, &token)) {
3509 pr_warn("bad IO class parameter '%s'\n", p);
3512 if (token != SRP_REV10_IB_IO_CLASS &&
3513 token != SRP_REV16A_IB_IO_CLASS) {
3514 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3515 token, SRP_REV10_IB_IO_CLASS,
3516 SRP_REV16A_IB_IO_CLASS);
3519 target->io_class = token;
3522 case SRP_OPT_INITIATOR_EXT:
3523 p = match_strdup(args);
3528 ret = kstrtoull(p, 16, &ull);
3530 pr_warn("bad initiator_ext value '%s'\n", p);
3534 target->initiator_ext = cpu_to_be64(ull);
3538 case SRP_OPT_CMD_SG_ENTRIES:
3539 if (match_int(args, &token) || token < 1 || token > 255) {
3540 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3544 target->cmd_sg_cnt = token;
3547 case SRP_OPT_ALLOW_EXT_SG:
3548 if (match_int(args, &token)) {
3549 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3552 target->allow_ext_sg = !!token;
3555 case SRP_OPT_SG_TABLESIZE:
3556 if (match_int(args, &token) || token < 1 ||
3557 token > SG_MAX_SEGMENTS) {
3558 pr_warn("bad max sg_tablesize parameter '%s'\n",
3562 target->sg_tablesize = token;
3565 case SRP_OPT_COMP_VECTOR:
3566 if (match_int(args, &token) || token < 0) {
3567 pr_warn("bad comp_vector parameter '%s'\n", p);
3570 target->comp_vector = token;
3573 case SRP_OPT_TL_RETRY_COUNT:
3574 if (match_int(args, &token) || token < 2 || token > 7) {
3575 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3579 target->tl_retry_count = token;
3582 case SRP_OPT_MAX_IT_IU_SIZE:
3583 if (match_int(args, &token) || token < 0) {
3584 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3587 target->max_it_iu_size = token;
3590 case SRP_OPT_CH_COUNT:
3591 if (match_int(args, &token) || token < 1) {
3592 pr_warn("bad channel count %s\n", p);
3595 target->ch_count = token;
3599 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3605 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3606 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3612 pr_warn("target creation request is missing one or more parameters\n");
3614 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3615 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3616 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3617 target->scsi_host->cmd_per_lun,
3618 target->scsi_host->can_queue);
3625 static ssize_t add_target_store(struct device *dev,
3626 struct device_attribute *attr, const char *buf,
3629 struct srp_host *host =
3630 container_of(dev, struct srp_host, dev);
3631 struct Scsi_Host *target_host;
3632 struct srp_target_port *target;
3633 struct srp_rdma_ch *ch;
3634 struct srp_device *srp_dev = host->srp_dev;
3635 struct ib_device *ibdev = srp_dev->dev;
3637 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3638 bool multich = false;
3639 uint32_t max_iu_len;
3641 target_host = scsi_host_alloc(&srp_template,
3642 sizeof (struct srp_target_port));
3646 target_host->transportt = ib_srp_transport_template;
3647 target_host->max_channel = 0;
3648 target_host->max_id = 1;
3649 target_host->max_lun = -1LL;
3650 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3651 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3653 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3654 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3656 target = host_to_target(target_host);
3658 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3659 target->io_class = SRP_REV16A_IB_IO_CLASS;
3660 target->scsi_host = target_host;
3661 target->srp_host = host;
3662 target->lkey = host->srp_dev->pd->local_dma_lkey;
3663 target->global_rkey = host->srp_dev->global_rkey;
3664 target->cmd_sg_cnt = cmd_sg_entries;
3665 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3666 target->allow_ext_sg = allow_ext_sg;
3667 target->tl_retry_count = 7;
3668 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3671 * Avoid that the SCSI host can be removed by srp_remove_target()
3672 * before this function returns.
3674 scsi_host_get(target->scsi_host);
3676 ret = mutex_lock_interruptible(&host->add_target_mutex);
3680 ret = srp_parse_options(target->net, buf, target);
3684 if (!srp_conn_unique(target->srp_host, target)) {
3685 if (target->using_rdma_cm) {
3686 shost_printk(KERN_INFO, target->scsi_host,
3687 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3688 be64_to_cpu(target->id_ext),
3689 be64_to_cpu(target->ioc_guid),
3690 &target->rdma_cm.dst);
3692 shost_printk(KERN_INFO, target->scsi_host,
3693 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3694 be64_to_cpu(target->id_ext),
3695 be64_to_cpu(target->ioc_guid),
3696 be64_to_cpu(target->initiator_ext));
3702 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3703 target->cmd_sg_cnt < target->sg_tablesize) {
3704 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3705 target->sg_tablesize = target->cmd_sg_cnt;
3708 if (srp_dev->use_fast_reg) {
3709 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3710 IB_DEVICE_SG_GAPS_REG);
3712 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3713 (ilog2(srp_dev->mr_page_size) - 9);
3716 * FR can only map one HCA page per entry. If the start
3717 * address is not aligned on a HCA page boundary two
3718 * entries will be used for the head and the tail
3719 * although these two entries combined contain at most
3720 * one HCA page of data. Hence the "+ 1" in the
3721 * calculation below.
3723 * The indirect data buffer descriptor is contiguous
3724 * so the memory for that buffer will only be
3725 * registered if register_always is true. Hence add
3726 * one to mr_per_cmd if register_always has been set.
3728 mr_per_cmd = register_always +
3729 (target->scsi_host->max_sectors + 1 +
3730 max_sectors_per_mr - 1) / max_sectors_per_mr;
3732 mr_per_cmd = register_always +
3733 (target->sg_tablesize +
3734 srp_dev->max_pages_per_mr - 1) /
3735 srp_dev->max_pages_per_mr;
3737 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3738 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3739 max_sectors_per_mr, mr_per_cmd);
3742 target_host->sg_tablesize = target->sg_tablesize;
3743 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3744 target->mr_per_cmd = mr_per_cmd;
3745 target->indirect_size = target->sg_tablesize *
3746 sizeof (struct srp_direct_buf);
3747 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3749 target->max_it_iu_size);
3751 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3752 INIT_WORK(&target->remove_work, srp_remove_work);
3753 spin_lock_init(&target->lock);
3754 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3759 if (target->ch_count == 0) {
3762 max(4 * num_online_nodes(),
3763 ibdev->num_comp_vectors),
3767 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3772 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3773 ch = &target->ch[ch_idx];
3774 ch->target = target;
3775 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3776 spin_lock_init(&ch->lock);
3777 INIT_LIST_HEAD(&ch->free_tx);
3778 ret = srp_new_cm_id(ch);
3780 goto err_disconnect;
3782 ret = srp_create_ch_ib(ch);
3784 goto err_disconnect;
3786 ret = srp_connect_ch(ch, max_iu_len, multich);
3790 if (target->using_rdma_cm)
3791 snprintf(dst, sizeof(dst), "%pIS",
3792 &target->rdma_cm.dst);
3794 snprintf(dst, sizeof(dst), "%pI6",
3795 target->ib_cm.orig_dgid.raw);
3796 shost_printk(KERN_ERR, target->scsi_host,
3797 PFX "Connection %d/%d to %s failed\n",
3799 target->ch_count, dst);
3803 srp_free_ch_ib(target, ch);
3804 target->ch_count = ch - target->ch;
3812 target->scsi_host->nr_hw_queues = target->ch_count;
3814 ret = srp_add_target(host, target);
3816 goto err_disconnect;
3818 if (target->state != SRP_TARGET_REMOVED) {
3819 if (target->using_rdma_cm) {
3820 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3821 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3822 be64_to_cpu(target->id_ext),
3823 be64_to_cpu(target->ioc_guid),
3824 target->sgid.raw, &target->rdma_cm.dst);
3826 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3827 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3828 be64_to_cpu(target->id_ext),
3829 be64_to_cpu(target->ioc_guid),
3830 be16_to_cpu(target->ib_cm.pkey),
3831 be64_to_cpu(target->ib_cm.service_id),
3833 target->ib_cm.orig_dgid.raw);
3840 mutex_unlock(&host->add_target_mutex);
3843 scsi_host_put(target->scsi_host);
3846 * If a call to srp_remove_target() has not been scheduled,
3847 * drop the network namespace reference now that was obtained
3848 * earlier in this function.
3850 if (target->state != SRP_TARGET_REMOVED)
3851 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3852 scsi_host_put(target->scsi_host);
3858 srp_disconnect_target(target);
3861 for (i = 0; i < target->ch_count; i++) {
3862 ch = &target->ch[i];
3863 srp_free_ch_ib(target, ch);
3870 static DEVICE_ATTR_WO(add_target);
3872 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3875 struct srp_host *host = container_of(dev, struct srp_host, dev);
3877 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3880 static DEVICE_ATTR_RO(ibdev);
3882 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3885 struct srp_host *host = container_of(dev, struct srp_host, dev);
3887 return sysfs_emit(buf, "%d\n", host->port);
3890 static DEVICE_ATTR_RO(port);
3892 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3894 struct srp_host *host;
3896 host = kzalloc(sizeof *host, GFP_KERNEL);
3900 INIT_LIST_HEAD(&host->target_list);
3901 spin_lock_init(&host->target_lock);
3902 init_completion(&host->released);
3903 mutex_init(&host->add_target_mutex);
3904 host->srp_dev = device;
3907 host->dev.class = &srp_class;
3908 host->dev.parent = device->dev->dev.parent;
3909 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3912 if (device_register(&host->dev))
3914 if (device_create_file(&host->dev, &dev_attr_add_target))
3916 if (device_create_file(&host->dev, &dev_attr_ibdev))
3918 if (device_create_file(&host->dev, &dev_attr_port))
3924 device_unregister(&host->dev);
3932 static void srp_rename_dev(struct ib_device *device, void *client_data)
3934 struct srp_device *srp_dev = client_data;
3935 struct srp_host *host, *tmp_host;
3937 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3938 char name[IB_DEVICE_NAME_MAX + 8];
3940 snprintf(name, sizeof(name), "srp-%s-%d",
3941 dev_name(&device->dev), host->port);
3942 device_rename(&host->dev, name);
3946 static int srp_add_one(struct ib_device *device)
3948 struct srp_device *srp_dev;
3949 struct ib_device_attr *attr = &device->attrs;
3950 struct srp_host *host;
3953 u64 max_pages_per_mr;
3954 unsigned int flags = 0;
3956 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3961 * Use the smallest page size supported by the HCA, down to a
3962 * minimum of 4096 bytes. We're unlikely to build large sglists
3963 * out of smaller entries.
3965 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3966 srp_dev->mr_page_size = 1 << mr_page_shift;
3967 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3968 max_pages_per_mr = attr->max_mr_size;
3969 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3970 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3971 attr->max_mr_size, srp_dev->mr_page_size,
3972 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3973 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3976 srp_dev->has_fr = (attr->device_cap_flags &
3977 IB_DEVICE_MEM_MGT_EXTENSIONS);
3978 if (!never_register && !srp_dev->has_fr)
3979 dev_warn(&device->dev, "FR is not supported\n");
3980 else if (!never_register &&
3981 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3982 srp_dev->use_fast_reg = srp_dev->has_fr;
3984 if (never_register || !register_always || !srp_dev->has_fr)
3985 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3987 if (srp_dev->use_fast_reg) {
3988 srp_dev->max_pages_per_mr =
3989 min_t(u32, srp_dev->max_pages_per_mr,
3990 attr->max_fast_reg_page_list_len);
3992 srp_dev->mr_max_size = srp_dev->mr_page_size *
3993 srp_dev->max_pages_per_mr;
3994 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3995 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3996 attr->max_fast_reg_page_list_len,
3997 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3999 INIT_LIST_HEAD(&srp_dev->dev_list);
4001 srp_dev->dev = device;
4002 srp_dev->pd = ib_alloc_pd(device, flags);
4003 if (IS_ERR(srp_dev->pd)) {
4004 int ret = PTR_ERR(srp_dev->pd);
4010 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4011 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4012 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4015 rdma_for_each_port (device, p) {
4016 host = srp_add_port(srp_dev, p);
4018 list_add_tail(&host->list, &srp_dev->dev_list);
4021 ib_set_client_data(device, &srp_client, srp_dev);
4025 static void srp_remove_one(struct ib_device *device, void *client_data)
4027 struct srp_device *srp_dev;
4028 struct srp_host *host, *tmp_host;
4029 struct srp_target_port *target;
4031 srp_dev = client_data;
4033 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4034 device_unregister(&host->dev);
4036 * Wait for the sysfs entry to go away, so that no new
4037 * target ports can be created.
4039 wait_for_completion(&host->released);
4042 * Remove all target ports.
4044 spin_lock(&host->target_lock);
4045 list_for_each_entry(target, &host->target_list, list)
4046 srp_queue_remove_work(target);
4047 spin_unlock(&host->target_lock);
4050 * Wait for tl_err and target port removal tasks.
4052 flush_workqueue(system_long_wq);
4053 flush_workqueue(srp_remove_wq);
4058 ib_dealloc_pd(srp_dev->pd);
4063 static struct srp_function_template ib_srp_transport_functions = {
4064 .has_rport_state = true,
4065 .reset_timer_if_blocked = true,
4066 .reconnect_delay = &srp_reconnect_delay,
4067 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4068 .dev_loss_tmo = &srp_dev_loss_tmo,
4069 .reconnect = srp_rport_reconnect,
4070 .rport_delete = srp_rport_delete,
4071 .terminate_rport_io = srp_terminate_io,
4074 static int __init srp_init_module(void)
4078 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4079 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4080 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4081 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4082 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4083 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4084 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4086 if (srp_sg_tablesize) {
4087 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4088 if (!cmd_sg_entries)
4089 cmd_sg_entries = srp_sg_tablesize;
4092 if (!cmd_sg_entries)
4093 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4095 if (cmd_sg_entries > 255) {
4096 pr_warn("Clamping cmd_sg_entries to 255\n");
4097 cmd_sg_entries = 255;
4100 if (!indirect_sg_entries)
4101 indirect_sg_entries = cmd_sg_entries;
4102 else if (indirect_sg_entries < cmd_sg_entries) {
4103 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4105 indirect_sg_entries = cmd_sg_entries;
4108 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4109 pr_warn("Clamping indirect_sg_entries to %u\n",
4111 indirect_sg_entries = SG_MAX_SEGMENTS;
4114 srp_remove_wq = create_workqueue("srp_remove");
4115 if (!srp_remove_wq) {
4121 ib_srp_transport_template =
4122 srp_attach_transport(&ib_srp_transport_functions);
4123 if (!ib_srp_transport_template)
4126 ret = class_register(&srp_class);
4128 pr_err("couldn't register class infiniband_srp\n");
4132 ib_sa_register_client(&srp_sa_client);
4134 ret = ib_register_client(&srp_client);
4136 pr_err("couldn't register IB client\n");
4144 ib_sa_unregister_client(&srp_sa_client);
4145 class_unregister(&srp_class);
4148 srp_release_transport(ib_srp_transport_template);
4151 destroy_workqueue(srp_remove_wq);
4155 static void __exit srp_cleanup_module(void)
4157 ib_unregister_client(&srp_client);
4158 ib_sa_unregister_client(&srp_sa_client);
4159 class_unregister(&srp_class);
4160 srp_release_transport(ib_srp_transport_template);
4161 destroy_workqueue(srp_remove_wq);
4164 module_init(srp_init_module);
4165 module_exit(srp_cleanup_module);