1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
13 #include <linux/module.h>
17 #include <rdma/ib_cm.h>
18 #include <rdma/ib_verbs.h>
19 #include "rtrs-srv-trace.h"
21 MODULE_DESCRIPTION("RDMA Transport Server");
22 MODULE_LICENSE("GPL");
24 /* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
25 #define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
26 #define DEFAULT_SESS_QUEUE_DEPTH 512
27 #define MAX_HDR_SIZE PAGE_SIZE
29 static struct rtrs_rdma_dev_pd dev_pd;
30 const struct class rtrs_dev_class = {
31 .name = "rtrs-server",
33 static struct rtrs_srv_ib_ctx ib_ctx;
35 static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
36 static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
38 static bool always_invalidate = true;
39 module_param(always_invalidate, bool, 0444);
40 MODULE_PARM_DESC(always_invalidate,
41 "Invalidate memory registration for contiguous memory regions before accessing.");
43 module_param_named(max_chunk_size, max_chunk_size, int, 0444);
44 MODULE_PARM_DESC(max_chunk_size,
45 "Max size for each IO request, when change the unit is in byte (default: "
46 __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
48 module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
49 MODULE_PARM_DESC(sess_queue_depth,
50 "Number of buffers for pending I/O requests to allocate per session. Maximum: "
51 __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
52 __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
54 static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
56 static struct workqueue_struct *rtrs_wq;
58 static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
60 return container_of(c, struct rtrs_srv_con, c);
63 static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
64 enum rtrs_srv_state new_state)
66 enum rtrs_srv_state old_state;
70 spin_lock_irqsave(&srv_path->state_lock, flags);
71 old_state = srv_path->state;
73 case RTRS_SRV_CONNECTED:
74 if (old_state == RTRS_SRV_CONNECTING)
77 case RTRS_SRV_CLOSING:
78 if (old_state == RTRS_SRV_CONNECTING ||
79 old_state == RTRS_SRV_CONNECTED)
83 if (old_state == RTRS_SRV_CLOSING)
90 srv_path->state = new_state;
91 spin_unlock_irqrestore(&srv_path->state_lock, flags);
96 static void free_id(struct rtrs_srv_op *id)
103 static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
105 struct rtrs_srv_sess *srv = srv_path->srv;
108 if (srv_path->ops_ids) {
109 for (i = 0; i < srv->queue_depth; i++)
110 free_id(srv_path->ops_ids[i]);
111 kfree(srv_path->ops_ids);
112 srv_path->ops_ids = NULL;
116 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
118 static struct ib_cqe io_comp_cqe = {
119 .done = rtrs_srv_rdma_done
122 static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
124 struct rtrs_srv_path *srv_path = container_of(ref,
125 struct rtrs_srv_path,
128 percpu_ref_exit(&srv_path->ids_inflight_ref);
129 complete(&srv_path->complete_done);
132 static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
134 struct rtrs_srv_sess *srv = srv_path->srv;
135 struct rtrs_srv_op *id;
138 srv_path->ops_ids = kcalloc(srv->queue_depth,
139 sizeof(*srv_path->ops_ids),
141 if (!srv_path->ops_ids)
144 for (i = 0; i < srv->queue_depth; ++i) {
145 id = kzalloc(sizeof(*id), GFP_KERNEL);
149 srv_path->ops_ids[i] = id;
152 ret = percpu_ref_init(&srv_path->ids_inflight_ref,
153 rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
155 pr_err("Percpu reference init failed\n");
158 init_completion(&srv_path->complete_done);
163 rtrs_srv_free_ops_ids(srv_path);
167 static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
169 percpu_ref_get(&srv_path->ids_inflight_ref);
172 static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
174 percpu_ref_put(&srv_path->ids_inflight_ref);
177 static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
179 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
180 struct rtrs_path *s = con->c.path;
181 struct rtrs_srv_path *srv_path = to_srv_path(s);
183 if (wc->status != IB_WC_SUCCESS) {
184 rtrs_err(s, "REG MR failed: %s\n",
185 ib_wc_status_msg(wc->status));
186 close_path(srv_path);
191 static struct ib_cqe local_reg_cqe = {
192 .done = rtrs_srv_reg_mr_done
195 static int rdma_write_sg(struct rtrs_srv_op *id)
197 struct rtrs_path *s = id->con->c.path;
198 struct rtrs_srv_path *srv_path = to_srv_path(s);
199 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
200 struct rtrs_srv_mr *srv_mr;
201 struct ib_send_wr inv_wr;
202 struct ib_rdma_wr imm_wr;
203 struct ib_rdma_wr *wr = NULL;
204 enum ib_send_flags flags;
209 struct ib_reg_wr rwr;
210 struct ib_sge *plist;
213 sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
214 need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
222 plist->addr = dma_addr + offset;
223 plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
225 /* WR will fail with length error
228 if (plist->length == 0) {
229 rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
233 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
234 offset += plist->length;
236 wr->wr.sg_list = plist;
238 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
239 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
243 /* Only one key is actually used */
244 WARN_ON_ONCE(rkey != wr->rkey);
246 wr->wr.opcode = IB_WR_RDMA_WRITE;
247 wr->wr.wr_cqe = &io_comp_cqe;
248 wr->wr.ex.imm_data = 0;
249 wr->wr.send_flags = 0;
251 if (need_inval && always_invalidate) {
252 wr->wr.next = &rwr.wr;
253 rwr.wr.next = &inv_wr;
254 inv_wr.next = &imm_wr.wr;
255 } else if (always_invalidate) {
256 wr->wr.next = &rwr.wr;
257 rwr.wr.next = &imm_wr.wr;
258 } else if (need_inval) {
259 wr->wr.next = &inv_wr;
260 inv_wr.next = &imm_wr.wr;
262 wr->wr.next = &imm_wr.wr;
265 * From time to time we have to post signaled sends,
266 * or send queue will fill up and only QP reset can help.
268 flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
269 0 : IB_SEND_SIGNALED;
272 inv_wr.sg_list = NULL;
274 inv_wr.opcode = IB_WR_SEND_WITH_INV;
275 inv_wr.wr_cqe = &io_comp_cqe;
276 inv_wr.send_flags = 0;
277 inv_wr.ex.invalidate_rkey = rkey;
280 imm_wr.wr.next = NULL;
281 if (always_invalidate) {
282 struct rtrs_msg_rkey_rsp *msg;
284 srv_mr = &srv_path->mrs[id->msg_id];
285 rwr.wr.opcode = IB_WR_REG_MR;
286 rwr.wr.wr_cqe = &local_reg_cqe;
289 rwr.wr.send_flags = 0;
290 rwr.key = srv_mr->mr->rkey;
291 rwr.access = (IB_ACCESS_LOCAL_WRITE |
292 IB_ACCESS_REMOTE_WRITE);
293 msg = srv_mr->iu->buf;
294 msg->buf_id = cpu_to_le16(id->msg_id);
295 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
296 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
298 list.addr = srv_mr->iu->dma_addr;
299 list.length = sizeof(*msg);
300 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
301 imm_wr.wr.sg_list = &list;
302 imm_wr.wr.num_sge = 1;
303 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
304 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
305 srv_mr->iu->dma_addr,
306 srv_mr->iu->size, DMA_TO_DEVICE);
308 imm_wr.wr.sg_list = NULL;
309 imm_wr.wr.num_sge = 0;
310 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
312 imm_wr.wr.send_flags = flags;
313 imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
316 imm_wr.wr.wr_cqe = &io_comp_cqe;
317 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
318 offset, DMA_BIDIRECTIONAL);
320 err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
323 "Posting RDMA-Write-Request to QP failed, err: %d\n",
330 * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
331 * requests or on successful WRITE request.
332 * @con: the connection to send back result
333 * @id: the id associated with the IO
334 * @errno: the error number of the IO.
336 * Return 0 on success, errno otherwise.
338 static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
341 struct rtrs_path *s = con->c.path;
342 struct rtrs_srv_path *srv_path = to_srv_path(s);
343 struct ib_send_wr inv_wr, *wr = NULL;
344 struct ib_rdma_wr imm_wr;
345 struct ib_reg_wr rwr;
346 struct rtrs_srv_mr *srv_mr;
347 bool need_inval = false;
348 enum ib_send_flags flags;
352 if (id->dir == READ) {
353 struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
356 need_inval = le16_to_cpu(rd_msg->flags) &
357 RTRS_MSG_NEED_INVAL_F;
358 sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
362 inv_wr.wr_cqe = &io_comp_cqe;
363 inv_wr.sg_list = NULL;
365 inv_wr.opcode = IB_WR_SEND_WITH_INV;
366 inv_wr.send_flags = 0;
367 /* Only one key is actually used */
368 inv_wr.ex.invalidate_rkey =
369 le32_to_cpu(rd_msg->desc[0].key);
377 trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
379 if (need_inval && always_invalidate) {
381 inv_wr.next = &rwr.wr;
382 rwr.wr.next = &imm_wr.wr;
383 } else if (always_invalidate) {
385 rwr.wr.next = &imm_wr.wr;
386 } else if (need_inval) {
388 inv_wr.next = &imm_wr.wr;
393 * From time to time we have to post signalled sends,
394 * or send queue will fill up and only QP reset can help.
396 flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
397 0 : IB_SEND_SIGNALED;
398 imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
399 imm_wr.wr.next = NULL;
400 if (always_invalidate) {
402 struct rtrs_msg_rkey_rsp *msg;
404 srv_mr = &srv_path->mrs[id->msg_id];
405 rwr.wr.next = &imm_wr.wr;
406 rwr.wr.opcode = IB_WR_REG_MR;
407 rwr.wr.wr_cqe = &local_reg_cqe;
409 rwr.wr.send_flags = 0;
411 rwr.key = srv_mr->mr->rkey;
412 rwr.access = (IB_ACCESS_LOCAL_WRITE |
413 IB_ACCESS_REMOTE_WRITE);
414 msg = srv_mr->iu->buf;
415 msg->buf_id = cpu_to_le16(id->msg_id);
416 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
417 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
419 list.addr = srv_mr->iu->dma_addr;
420 list.length = sizeof(*msg);
421 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
422 imm_wr.wr.sg_list = &list;
423 imm_wr.wr.num_sge = 1;
424 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
425 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
426 srv_mr->iu->dma_addr,
427 srv_mr->iu->size, DMA_TO_DEVICE);
429 imm_wr.wr.sg_list = NULL;
430 imm_wr.wr.num_sge = 0;
431 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
433 imm_wr.wr.send_flags = flags;
434 imm_wr.wr.wr_cqe = &io_comp_cqe;
436 imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
438 err = ib_post_send(id->con->c.qp, wr, NULL);
440 rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
446 void close_path(struct rtrs_srv_path *srv_path)
448 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
449 queue_work(rtrs_wq, &srv_path->close_work);
450 WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
453 static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
456 case RTRS_SRV_CONNECTING:
457 return "RTRS_SRV_CONNECTING";
458 case RTRS_SRV_CONNECTED:
459 return "RTRS_SRV_CONNECTED";
460 case RTRS_SRV_CLOSING:
461 return "RTRS_SRV_CLOSING";
462 case RTRS_SRV_CLOSED:
463 return "RTRS_SRV_CLOSED";
470 * rtrs_srv_resp_rdma() - Finish an RDMA request
472 * @id: Internal RTRS operation identifier
473 * @status: Response Code sent to the other side for this operation.
474 * 0 = success, <=0 error
477 * Finish a RDMA operation. A message is sent to the client and the
478 * corresponding memory areas will be released.
480 bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
482 struct rtrs_srv_path *srv_path;
483 struct rtrs_srv_con *con;
492 srv_path = to_srv_path(s);
496 if (srv_path->state != RTRS_SRV_CONNECTED) {
498 "Sending I/O response failed, server path %s is disconnected, path state %s\n",
499 kobject_name(&srv_path->kobj),
500 rtrs_srv_state_str(srv_path->state));
503 if (always_invalidate) {
504 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
506 ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
508 if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
509 rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
510 kobject_name(&srv_path->kobj),
512 atomic_add(1, &con->c.sq_wr_avail);
513 spin_lock(&con->rsp_wr_wait_lock);
514 list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
515 spin_unlock(&con->rsp_wr_wait_lock);
519 if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
520 err = send_io_resp_imm(con, id, status);
522 err = rdma_write_sg(id);
525 rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
526 kobject_name(&srv_path->kobj));
527 close_path(srv_path);
530 rtrs_srv_put_ops_ids(srv_path);
533 EXPORT_SYMBOL(rtrs_srv_resp_rdma);
536 * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
537 * @srv: Session pointer
538 * @priv: The private pointer that is associated with the session.
540 void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
544 EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
546 static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
550 for (i = 0; i < srv_path->mrs_num; i++) {
551 struct rtrs_srv_mr *srv_mr;
553 srv_mr = &srv_path->mrs[i];
554 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
555 ib_dereg_mr(srv_mr->mr);
556 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
557 srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
558 sg_free_table(&srv_mr->sgt);
560 kfree(srv_path->mrs);
563 static int map_cont_bufs(struct rtrs_srv_path *srv_path)
565 struct rtrs_srv_sess *srv = srv_path->srv;
566 struct rtrs_path *ss = &srv_path->s;
568 unsigned int chunk_bits;
569 int chunks_per_mr = 1;
571 struct sg_table *sgt;
574 * Here we map queue_depth chunks to MR. Firstly we have to
575 * figure out how many chunks can we map per MR.
577 if (always_invalidate) {
579 * in order to do invalidate for each chunks of memory, we needs
580 * more memory regions.
582 mrs_num = srv->queue_depth;
585 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
586 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
587 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
590 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
594 for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
595 srv_path->mrs_num++) {
596 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
597 struct scatterlist *s;
598 int nr, nr_sgt, chunks;
601 chunks = chunks_per_mr * srv_path->mrs_num;
602 if (!always_invalidate)
603 chunks_per_mr = min_t(int, chunks_per_mr,
604 srv->queue_depth - chunks);
606 err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
610 for_each_sg(sgt->sgl, s, chunks_per_mr, i)
611 sg_set_page(s, srv->chunks[chunks + i],
614 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
615 sgt->nents, DMA_BIDIRECTIONAL);
620 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
626 nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
627 NULL, max_chunk_size);
629 err = nr < 0 ? nr : -EINVAL;
633 if (always_invalidate) {
634 srv_mr->iu = rtrs_iu_alloc(1,
635 sizeof(struct rtrs_msg_rkey_rsp),
636 GFP_KERNEL, srv_path->s.dev->ib_dev,
637 DMA_TO_DEVICE, rtrs_srv_rdma_done);
640 rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
644 /* Eventually dma addr for each chunk can be cached */
645 for_each_sg(sgt->sgl, s, nr_sgt, i)
646 srv_path->dma_addr[chunks + i] = sg_dma_address(s);
648 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
652 chunk_bits = ilog2(srv->queue_depth - 1) + 1;
653 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
660 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
661 sgt->nents, DMA_BIDIRECTIONAL);
665 unmap_cont_bufs(srv_path);
670 static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
672 close_path(to_srv_path(c->path));
675 static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
677 rtrs_init_hb(&srv_path->s, &io_comp_cqe,
680 rtrs_srv_hb_err_handler,
684 static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
686 rtrs_start_hb(&srv_path->s);
689 static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
691 rtrs_stop_hb(&srv_path->s);
694 static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
696 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
697 struct rtrs_path *s = con->c.path;
698 struct rtrs_srv_path *srv_path = to_srv_path(s);
701 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
702 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
704 if (wc->status != IB_WC_SUCCESS) {
705 rtrs_err(s, "Sess info response send failed: %s\n",
706 ib_wc_status_msg(wc->status));
707 close_path(srv_path);
710 WARN_ON(wc->opcode != IB_WC_SEND);
713 static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
715 struct rtrs_srv_sess *srv = srv_path->srv;
716 struct rtrs_srv_ctx *ctx = srv->ctx;
719 mutex_lock(&srv->paths_ev_mutex);
720 up = ++srv->paths_up;
722 ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
723 mutex_unlock(&srv->paths_ev_mutex);
725 /* Mark session as established */
727 srv_path->established = true;
732 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
734 struct rtrs_srv_sess *srv = srv_path->srv;
735 struct rtrs_srv_ctx *ctx = srv->ctx;
737 if (!srv_path->established)
740 srv_path->established = false;
741 mutex_lock(&srv->paths_ev_mutex);
742 WARN_ON(!srv->paths_up);
743 if (--srv->paths_up == 0)
744 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
745 mutex_unlock(&srv->paths_ev_mutex);
748 static bool exist_pathname(struct rtrs_srv_ctx *ctx,
749 const char *pathname, const uuid_t *path_uuid)
751 struct rtrs_srv_sess *srv;
752 struct rtrs_srv_path *srv_path;
755 mutex_lock(&ctx->srv_mutex);
756 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
757 mutex_lock(&srv->paths_mutex);
759 /* when a client with same uuid and same sessname tried to add a path */
760 if (uuid_equal(&srv->paths_uuid, path_uuid)) {
761 mutex_unlock(&srv->paths_mutex);
765 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
766 if (strlen(srv_path->s.sessname) == strlen(pathname) &&
767 !strcmp(srv_path->s.sessname, pathname)) {
772 mutex_unlock(&srv->paths_mutex);
776 mutex_unlock(&ctx->srv_mutex);
780 static int post_recv_path(struct rtrs_srv_path *srv_path);
781 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
783 static int process_info_req(struct rtrs_srv_con *con,
784 struct rtrs_msg_info_req *msg)
786 struct rtrs_path *s = con->c.path;
787 struct rtrs_srv_path *srv_path = to_srv_path(s);
788 struct ib_send_wr *reg_wr = NULL;
789 struct rtrs_msg_info_rsp *rsp;
790 struct rtrs_iu *tx_iu;
791 struct ib_reg_wr *rwr;
795 err = post_recv_path(srv_path);
797 rtrs_err(s, "post_recv_path(), err: %d\n", err);
801 if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
802 rtrs_err(s, "pathname cannot contain / and .\n");
806 if (exist_pathname(srv_path->srv->ctx,
807 msg->pathname, &srv_path->srv->paths_uuid)) {
808 rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
811 strscpy(srv_path->s.sessname, msg->pathname,
812 sizeof(srv_path->s.sessname));
814 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
818 tx_sz = sizeof(*rsp);
819 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
820 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
821 DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
828 rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
829 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
831 for (mri = 0; mri < srv_path->mrs_num; mri++) {
832 struct ib_mr *mr = srv_path->mrs[mri].mr;
834 rsp->desc[mri].addr = cpu_to_le64(mr->iova);
835 rsp->desc[mri].key = cpu_to_le32(mr->rkey);
836 rsp->desc[mri].len = cpu_to_le32(mr->length);
839 * Fill in reg MR request and chain them *backwards*
841 rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
842 rwr[mri].wr.opcode = IB_WR_REG_MR;
843 rwr[mri].wr.wr_cqe = &local_reg_cqe;
844 rwr[mri].wr.num_sge = 0;
845 rwr[mri].wr.send_flags = 0;
847 rwr[mri].key = mr->rkey;
848 rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
849 IB_ACCESS_REMOTE_WRITE);
850 reg_wr = &rwr[mri].wr;
853 err = rtrs_srv_create_path_files(srv_path);
856 kobject_get(&srv_path->kobj);
857 get_device(&srv_path->srv->dev);
858 err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
860 rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
864 rtrs_srv_start_hb(srv_path);
867 * We do not account number of established connections at the current
868 * moment, we rely on the client, which should send info request when
869 * all connections are successfully established. Thus, simply notify
870 * listener with a proper event if we are the first path.
872 err = rtrs_srv_path_up(srv_path);
874 rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
878 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
880 tx_iu->size, DMA_TO_DEVICE);
882 /* Send info response */
883 err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
885 rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
887 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
895 static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
897 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
898 struct rtrs_path *s = con->c.path;
899 struct rtrs_srv_path *srv_path = to_srv_path(s);
900 struct rtrs_msg_info_req *msg;
906 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
907 if (wc->status != IB_WC_SUCCESS) {
908 rtrs_err(s, "Sess info request receive failed: %s\n",
909 ib_wc_status_msg(wc->status));
912 WARN_ON(wc->opcode != IB_WC_RECV);
914 if (wc->byte_len < sizeof(*msg)) {
915 rtrs_err(s, "Sess info request is malformed: size %d\n",
919 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
920 iu->size, DMA_FROM_DEVICE);
922 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
923 rtrs_err(s, "Sess info request is malformed: type %d\n",
924 le16_to_cpu(msg->type));
927 err = process_info_req(con, msg);
932 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
935 close_path(srv_path);
939 static int post_recv_info_req(struct rtrs_srv_con *con)
941 struct rtrs_path *s = con->c.path;
942 struct rtrs_srv_path *srv_path = to_srv_path(s);
943 struct rtrs_iu *rx_iu;
946 rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
947 GFP_KERNEL, srv_path->s.dev->ib_dev,
948 DMA_FROM_DEVICE, rtrs_srv_info_req_done);
951 /* Prepare for getting info response */
952 err = rtrs_iu_post_recv(&con->c, rx_iu);
954 rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
955 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
962 static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
966 for (i = 0; i < q_size; i++) {
967 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
975 static int post_recv_path(struct rtrs_srv_path *srv_path)
977 struct rtrs_srv_sess *srv = srv_path->srv;
978 struct rtrs_path *s = &srv_path->s;
982 for (cid = 0; cid < srv_path->s.con_num; cid++) {
984 q_size = SERVICE_CON_QUEUE_DEPTH;
986 q_size = srv->queue_depth;
988 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
990 rtrs_err(s, "post_recv_io(), err: %d\n", err);
998 static void process_read(struct rtrs_srv_con *con,
999 struct rtrs_msg_rdma_read *msg,
1000 u32 buf_id, u32 off)
1002 struct rtrs_path *s = con->c.path;
1003 struct rtrs_srv_path *srv_path = to_srv_path(s);
1004 struct rtrs_srv_sess *srv = srv_path->srv;
1005 struct rtrs_srv_ctx *ctx = srv->ctx;
1006 struct rtrs_srv_op *id;
1008 size_t usr_len, data_len;
1012 if (srv_path->state != RTRS_SRV_CONNECTED) {
1014 "Processing read request failed, session is disconnected, sess state %s\n",
1015 rtrs_srv_state_str(srv_path->state));
1018 if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
1020 "Processing read request failed, invalid message\n");
1023 rtrs_srv_get_ops_ids(srv_path);
1024 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
1025 id = srv_path->ops_ids[buf_id];
1028 id->msg_id = buf_id;
1030 usr_len = le16_to_cpu(msg->usr_len);
1031 data_len = off - usr_len;
1032 data = page_address(srv->chunks[buf_id]);
1033 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1034 data + data_len, usr_len);
1038 "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
1046 ret = send_io_resp_imm(con, id, ret);
1049 "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
1051 close_path(srv_path);
1053 rtrs_srv_put_ops_ids(srv_path);
1056 static void process_write(struct rtrs_srv_con *con,
1057 struct rtrs_msg_rdma_write *req,
1058 u32 buf_id, u32 off)
1060 struct rtrs_path *s = con->c.path;
1061 struct rtrs_srv_path *srv_path = to_srv_path(s);
1062 struct rtrs_srv_sess *srv = srv_path->srv;
1063 struct rtrs_srv_ctx *ctx = srv->ctx;
1064 struct rtrs_srv_op *id;
1066 size_t data_len, usr_len;
1070 if (srv_path->state != RTRS_SRV_CONNECTED) {
1072 "Processing write request failed, session is disconnected, sess state %s\n",
1073 rtrs_srv_state_str(srv_path->state));
1076 rtrs_srv_get_ops_ids(srv_path);
1077 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
1078 id = srv_path->ops_ids[buf_id];
1081 id->msg_id = buf_id;
1083 usr_len = le16_to_cpu(req->usr_len);
1084 data_len = off - usr_len;
1085 data = page_address(srv->chunks[buf_id]);
1086 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1087 data + data_len, usr_len);
1090 "Processing write request failed, user module callback reports err: %d\n",
1098 ret = send_io_resp_imm(con, id, ret);
1101 "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
1103 close_path(srv_path);
1105 rtrs_srv_put_ops_ids(srv_path);
1108 static void process_io_req(struct rtrs_srv_con *con, void *msg,
1111 struct rtrs_path *s = con->c.path;
1112 struct rtrs_srv_path *srv_path = to_srv_path(s);
1113 struct rtrs_msg_rdma_hdr *hdr;
1116 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
1117 srv_path->dma_addr[id],
1118 max_chunk_size, DMA_BIDIRECTIONAL);
1120 type = le16_to_cpu(hdr->type);
1123 case RTRS_MSG_WRITE:
1124 process_write(con, msg, id, off);
1127 process_read(con, msg, id, off);
1131 "Processing I/O request failed, unknown message type received: 0x%02x\n",
1139 close_path(srv_path);
1142 static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1144 struct rtrs_srv_mr *mr =
1145 container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
1146 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1147 struct rtrs_path *s = con->c.path;
1148 struct rtrs_srv_path *srv_path = to_srv_path(s);
1149 struct rtrs_srv_sess *srv = srv_path->srv;
1153 if (wc->status != IB_WC_SUCCESS) {
1154 rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
1155 ib_wc_status_msg(wc->status));
1156 close_path(srv_path);
1158 msg_id = mr->msg_id;
1160 data = page_address(srv->chunks[msg_id]) + off;
1161 process_io_req(con, data, msg_id, off);
1164 static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
1165 struct rtrs_srv_mr *mr)
1167 struct ib_send_wr wr = {
1168 .opcode = IB_WR_LOCAL_INV,
1169 .wr_cqe = &mr->inv_cqe,
1170 .send_flags = IB_SEND_SIGNALED,
1171 .ex.invalidate_rkey = mr->mr->rkey,
1173 mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
1175 return ib_post_send(con->c.qp, &wr, NULL);
1178 static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
1180 spin_lock(&con->rsp_wr_wait_lock);
1181 while (!list_empty(&con->rsp_wr_wait_list)) {
1182 struct rtrs_srv_op *id;
1185 id = list_entry(con->rsp_wr_wait_list.next,
1186 struct rtrs_srv_op, wait_list);
1187 list_del(&id->wait_list);
1189 spin_unlock(&con->rsp_wr_wait_lock);
1190 ret = rtrs_srv_resp_rdma(id, id->status);
1191 spin_lock(&con->rsp_wr_wait_lock);
1194 list_add(&id->wait_list, &con->rsp_wr_wait_list);
1198 spin_unlock(&con->rsp_wr_wait_lock);
1201 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
1203 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1204 struct rtrs_path *s = con->c.path;
1205 struct rtrs_srv_path *srv_path = to_srv_path(s);
1206 struct rtrs_srv_sess *srv = srv_path->srv;
1207 u32 imm_type, imm_payload;
1210 if (wc->status != IB_WC_SUCCESS) {
1211 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1213 "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
1214 ib_wc_status_msg(wc->status), wc->wr_cqe,
1215 wc->opcode, wc->vendor_err, wc->byte_len);
1216 close_path(srv_path);
1221 switch (wc->opcode) {
1222 case IB_WC_RECV_RDMA_WITH_IMM:
1224 * post_recv() RDMA write completions of IO reqs (read/write)
1227 if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
1229 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
1231 rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
1232 close_path(srv_path);
1235 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
1236 &imm_type, &imm_payload);
1237 if (imm_type == RTRS_IO_REQ_IMM) {
1241 msg_id = imm_payload >> srv_path->mem_bits;
1242 off = imm_payload & ((1 << srv_path->mem_bits) - 1);
1243 if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
1244 rtrs_err(s, "Wrong msg_id %u, off %u\n",
1246 close_path(srv_path);
1249 if (always_invalidate) {
1250 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
1253 mr->msg_id = msg_id;
1254 err = rtrs_srv_inv_rkey(con, mr);
1256 rtrs_err(s, "rtrs_post_recv(), err: %d\n",
1258 close_path(srv_path);
1262 data = page_address(srv->chunks[msg_id]) + off;
1263 process_io_req(con, data, msg_id, off);
1265 } else if (imm_type == RTRS_HB_MSG_IMM) {
1266 WARN_ON(con->c.cid);
1267 rtrs_send_hb_ack(&srv_path->s);
1268 } else if (imm_type == RTRS_HB_ACK_IMM) {
1269 WARN_ON(con->c.cid);
1270 srv_path->s.hb_missed_cnt = 0;
1272 rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
1275 case IB_WC_RDMA_WRITE:
1278 * post_send() RDMA write completions of IO reqs (read/write)
1281 atomic_add(s->signal_interval, &con->c.sq_wr_avail);
1283 if (!list_empty_careful(&con->rsp_wr_wait_list))
1284 rtrs_rdma_process_wr_wait_list(con);
1288 rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
1294 * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
1296 * @pathname: Pathname buffer
1297 * @len: Length of sessname buffer
1299 int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
1302 struct rtrs_srv_path *srv_path;
1303 int err = -ENOTCONN;
1305 mutex_lock(&srv->paths_mutex);
1306 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1307 if (srv_path->state != RTRS_SRV_CONNECTED)
1309 strscpy(pathname, srv_path->s.sessname,
1310 min_t(size_t, sizeof(srv_path->s.sessname), len));
1314 mutex_unlock(&srv->paths_mutex);
1318 EXPORT_SYMBOL(rtrs_srv_get_path_name);
1321 * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
1324 int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
1326 return srv->queue_depth;
1328 EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
1330 static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
1332 struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
1335 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
1336 if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
1337 v = cpumask_first(&cq_affinity_mask);
1341 static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
1343 srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
1345 return srv_path->cur_cq_vector;
1348 static void rtrs_srv_dev_release(struct device *dev)
1350 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
1356 static void free_srv(struct rtrs_srv_sess *srv)
1360 WARN_ON(refcount_read(&srv->refcount));
1361 for (i = 0; i < srv->queue_depth; i++)
1362 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1364 mutex_destroy(&srv->paths_mutex);
1365 mutex_destroy(&srv->paths_ev_mutex);
1366 /* last put to release the srv structure */
1367 put_device(&srv->dev);
1370 static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
1371 const uuid_t *paths_uuid,
1374 struct rtrs_srv_sess *srv;
1377 mutex_lock(&ctx->srv_mutex);
1378 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
1379 if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
1380 refcount_inc_not_zero(&srv->refcount)) {
1381 mutex_unlock(&ctx->srv_mutex);
1385 mutex_unlock(&ctx->srv_mutex);
1387 * If this request is not the first connection request from the
1388 * client for this session then fail and return error.
1391 pr_err_ratelimited("Error: Not the first connection request for this session\n");
1392 return ERR_PTR(-ENXIO);
1395 /* need to allocate a new srv */
1396 srv = kzalloc(sizeof(*srv), GFP_KERNEL);
1398 return ERR_PTR(-ENOMEM);
1400 INIT_LIST_HEAD(&srv->paths_list);
1401 mutex_init(&srv->paths_mutex);
1402 mutex_init(&srv->paths_ev_mutex);
1403 uuid_copy(&srv->paths_uuid, paths_uuid);
1404 srv->queue_depth = sess_queue_depth;
1406 device_initialize(&srv->dev);
1407 srv->dev.release = rtrs_srv_dev_release;
1409 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
1414 for (i = 0; i < srv->queue_depth; i++) {
1415 srv->chunks[i] = alloc_pages(GFP_KERNEL,
1416 get_order(max_chunk_size));
1417 if (!srv->chunks[i])
1418 goto err_free_chunks;
1420 refcount_set(&srv->refcount, 1);
1421 mutex_lock(&ctx->srv_mutex);
1422 list_add(&srv->ctx_list, &ctx->srv_list);
1423 mutex_unlock(&ctx->srv_mutex);
1429 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1434 return ERR_PTR(-ENOMEM);
1437 static void put_srv(struct rtrs_srv_sess *srv)
1439 if (refcount_dec_and_test(&srv->refcount)) {
1440 struct rtrs_srv_ctx *ctx = srv->ctx;
1442 WARN_ON(srv->dev.kobj.state_in_sysfs);
1444 mutex_lock(&ctx->srv_mutex);
1445 list_del(&srv->ctx_list);
1446 mutex_unlock(&ctx->srv_mutex);
1451 static void __add_path_to_srv(struct rtrs_srv_sess *srv,
1452 struct rtrs_srv_path *srv_path)
1454 list_add_tail(&srv_path->s.entry, &srv->paths_list);
1456 WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
1459 static void del_path_from_srv(struct rtrs_srv_path *srv_path)
1461 struct rtrs_srv_sess *srv = srv_path->srv;
1466 mutex_lock(&srv->paths_mutex);
1467 list_del(&srv_path->s.entry);
1468 WARN_ON(!srv->paths_num);
1470 mutex_unlock(&srv->paths_mutex);
1473 /* return true if addresses are the same, error other wise */
1474 static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
1476 switch (a->sa_family) {
1478 return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
1479 &((struct sockaddr_ib *)b)->sib_addr,
1480 sizeof(struct ib_addr)) &&
1481 (b->sa_family == AF_IB);
1483 return memcmp(&((struct sockaddr_in *)a)->sin_addr,
1484 &((struct sockaddr_in *)b)->sin_addr,
1485 sizeof(struct in_addr)) &&
1486 (b->sa_family == AF_INET);
1488 return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
1489 &((struct sockaddr_in6 *)b)->sin6_addr,
1490 sizeof(struct in6_addr)) &&
1491 (b->sa_family == AF_INET6);
1497 static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
1498 struct rdma_addr *addr)
1500 struct rtrs_srv_path *srv_path;
1502 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
1503 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
1504 (struct sockaddr *)&addr->dst_addr) &&
1505 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
1506 (struct sockaddr *)&addr->src_addr))
1512 static void free_path(struct rtrs_srv_path *srv_path)
1514 if (srv_path->kobj.state_in_sysfs) {
1515 kobject_del(&srv_path->kobj);
1516 kobject_put(&srv_path->kobj);
1518 free_percpu(srv_path->stats->rdma_stats);
1519 kfree(srv_path->stats);
1524 static void rtrs_srv_close_work(struct work_struct *work)
1526 struct rtrs_srv_path *srv_path;
1527 struct rtrs_srv_con *con;
1530 srv_path = container_of(work, typeof(*srv_path), close_work);
1532 rtrs_srv_destroy_path_files(srv_path);
1533 rtrs_srv_stop_hb(srv_path);
1535 for (i = 0; i < srv_path->s.con_num; i++) {
1536 if (!srv_path->s.con[i])
1538 con = to_srv_con(srv_path->s.con[i]);
1539 rdma_disconnect(con->c.cm_id);
1540 ib_drain_qp(con->c.qp);
1544 * Degrade ref count to the usual model with a single shared
1547 percpu_ref_kill(&srv_path->ids_inflight_ref);
1549 /* Wait for all completion */
1550 wait_for_completion(&srv_path->complete_done);
1552 /* Notify upper layer if we are the last path */
1553 rtrs_srv_path_down(srv_path);
1555 unmap_cont_bufs(srv_path);
1556 rtrs_srv_free_ops_ids(srv_path);
1558 for (i = 0; i < srv_path->s.con_num; i++) {
1559 if (!srv_path->s.con[i])
1561 con = to_srv_con(srv_path->s.con[i]);
1562 rtrs_cq_qp_destroy(&con->c);
1563 rdma_destroy_id(con->c.cm_id);
1566 rtrs_ib_dev_put(srv_path->s.dev);
1568 del_path_from_srv(srv_path);
1569 put_srv(srv_path->srv);
1570 srv_path->srv = NULL;
1571 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
1573 kfree(srv_path->dma_addr);
1574 kfree(srv_path->s.con);
1575 free_path(srv_path);
1578 static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
1579 struct rdma_cm_id *cm_id)
1581 struct rtrs_srv_sess *srv = srv_path->srv;
1582 struct rtrs_msg_conn_rsp msg;
1583 struct rdma_conn_param param;
1586 param = (struct rdma_conn_param) {
1587 .rnr_retry_count = 7,
1588 .private_data = &msg,
1589 .private_data_len = sizeof(msg),
1592 msg = (struct rtrs_msg_conn_rsp) {
1593 .magic = cpu_to_le16(RTRS_MAGIC),
1594 .version = cpu_to_le16(RTRS_PROTO_VER),
1595 .queue_depth = cpu_to_le16(srv->queue_depth),
1596 .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
1597 .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
1600 if (always_invalidate)
1601 msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
1603 err = rdma_accept(cm_id, ¶m);
1605 pr_err("rdma_accept(), err: %d\n", err);
1610 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
1612 struct rtrs_msg_conn_rsp msg;
1615 msg = (struct rtrs_msg_conn_rsp) {
1616 .magic = cpu_to_le16(RTRS_MAGIC),
1617 .version = cpu_to_le16(RTRS_PROTO_VER),
1618 .errno = cpu_to_le16(errno),
1621 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
1623 pr_err("rdma_reject(), err: %d\n", err);
1625 /* Bounce errno back */
1629 static struct rtrs_srv_path *
1630 __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
1632 struct rtrs_srv_path *srv_path;
1634 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1635 if (uuid_equal(&srv_path->s.uuid, sess_uuid))
1642 static int create_con(struct rtrs_srv_path *srv_path,
1643 struct rdma_cm_id *cm_id,
1646 struct rtrs_srv_sess *srv = srv_path->srv;
1647 struct rtrs_path *s = &srv_path->s;
1648 struct rtrs_srv_con *con;
1650 u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
1653 con = kzalloc(sizeof(*con), GFP_KERNEL);
1659 spin_lock_init(&con->rsp_wr_wait_lock);
1660 INIT_LIST_HEAD(&con->rsp_wr_wait_list);
1661 con->c.cm_id = cm_id;
1662 con->c.path = &srv_path->s;
1664 atomic_set(&con->c.wr_cnt, 1);
1665 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
1667 if (con->c.cid == 0) {
1669 * All receive and all send (each requiring invalidate)
1670 * + 2 for drain and heartbeat
1672 max_send_wr = min_t(int, wr_limit,
1673 SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1674 max_recv_wr = max_send_wr;
1675 s->signal_interval = min_not_zero(srv->queue_depth,
1676 (size_t)SERVICE_CON_QUEUE_DEPTH);
1678 /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
1679 if (always_invalidate)
1681 min_t(int, wr_limit,
1682 srv->queue_depth * (1 + 4) + 1);
1685 min_t(int, wr_limit,
1686 srv->queue_depth * (1 + 2) + 1);
1688 max_recv_wr = srv->queue_depth + 1;
1690 cq_num = max_send_wr + max_recv_wr;
1691 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1692 cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
1694 /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
1695 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
1696 max_send_wr, max_recv_wr,
1699 rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
1702 if (con->c.cid == 0) {
1703 err = post_recv_info_req(con);
1707 WARN_ON(srv_path->s.con[cid]);
1708 srv_path->s.con[cid] = &con->c;
1711 * Change context from server to current connection. The other
1712 * way is to use cm_id->qp->qp_context, which does not work on OFED.
1714 cm_id->context = &con->c;
1719 rtrs_cq_qp_destroy(&con->c);
1727 static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
1728 struct rdma_cm_id *cm_id,
1729 unsigned int con_num,
1730 unsigned int recon_cnt,
1733 struct rtrs_srv_path *srv_path;
1736 struct rtrs_addr path;
1738 if (srv->paths_num >= MAX_PATHS_NUM) {
1742 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
1744 pr_err("Path with same addr exists\n");
1747 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
1751 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
1752 if (!srv_path->stats)
1755 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
1756 if (!srv_path->stats->rdma_stats)
1757 goto err_free_stats;
1759 srv_path->stats->srv_path = srv_path;
1761 srv_path->dma_addr = kcalloc(srv->queue_depth,
1762 sizeof(*srv_path->dma_addr),
1764 if (!srv_path->dma_addr)
1765 goto err_free_percpu;
1767 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
1769 if (!srv_path->s.con)
1770 goto err_free_dma_addr;
1772 srv_path->state = RTRS_SRV_CONNECTING;
1773 srv_path->srv = srv;
1774 srv_path->cur_cq_vector = -1;
1775 srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
1776 srv_path->s.src_addr = cm_id->route.addr.src_addr;
1778 /* temporary until receiving session-name from client */
1779 path.src = &srv_path->s.src_addr;
1780 path.dst = &srv_path->s.dst_addr;
1781 rtrs_addr_to_str(&path, str, sizeof(str));
1782 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
1784 srv_path->s.con_num = con_num;
1785 srv_path->s.irq_con_num = con_num;
1786 srv_path->s.recon_cnt = recon_cnt;
1787 uuid_copy(&srv_path->s.uuid, uuid);
1788 spin_lock_init(&srv_path->state_lock);
1789 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
1790 rtrs_srv_init_hb(srv_path);
1792 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
1793 if (!srv_path->s.dev) {
1797 err = map_cont_bufs(srv_path);
1801 err = rtrs_srv_alloc_ops_ids(srv_path);
1803 goto err_unmap_bufs;
1805 __add_path_to_srv(srv, srv_path);
1810 unmap_cont_bufs(srv_path);
1812 rtrs_ib_dev_put(srv_path->s.dev);
1814 kfree(srv_path->s.con);
1816 kfree(srv_path->dma_addr);
1818 free_percpu(srv_path->stats->rdma_stats);
1820 kfree(srv_path->stats);
1824 return ERR_PTR(err);
1827 static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
1828 const struct rtrs_msg_conn_req *msg,
1831 struct rtrs_srv_ctx *ctx = cm_id->context;
1832 struct rtrs_srv_path *srv_path;
1833 struct rtrs_srv_sess *srv;
1835 u16 version, con_num, cid;
1837 int err = -ECONNRESET;
1839 if (len < sizeof(*msg)) {
1840 pr_err("Invalid RTRS connection request\n");
1843 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1844 pr_err("Invalid RTRS magic\n");
1847 version = le16_to_cpu(msg->version);
1848 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1849 pr_err("Unsupported major RTRS version: %d, expected %d\n",
1850 version >> 8, RTRS_PROTO_VER_MAJOR);
1853 con_num = le16_to_cpu(msg->cid_num);
1854 if (con_num > 4096) {
1856 pr_err("Too many connections requested: %d\n", con_num);
1859 cid = le16_to_cpu(msg->cid);
1860 if (cid >= con_num) {
1862 pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
1865 recon_cnt = le16_to_cpu(msg->recon_cnt);
1866 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
1869 pr_err("get_or_create_srv(), error %d\n", err);
1872 mutex_lock(&srv->paths_mutex);
1873 srv_path = __find_path(srv, &msg->sess_uuid);
1875 struct rtrs_path *s = &srv_path->s;
1877 /* Session already holds a reference */
1880 if (srv_path->state != RTRS_SRV_CONNECTING) {
1881 rtrs_err(s, "Session in wrong state: %s\n",
1882 rtrs_srv_state_str(srv_path->state));
1883 mutex_unlock(&srv->paths_mutex);
1889 if (con_num != s->con_num || cid >= s->con_num) {
1890 rtrs_err(s, "Incorrect request: %d, %d\n",
1892 mutex_unlock(&srv->paths_mutex);
1896 rtrs_err(s, "Connection already exists: %d\n",
1898 mutex_unlock(&srv->paths_mutex);
1902 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
1904 if (IS_ERR(srv_path)) {
1905 mutex_unlock(&srv->paths_mutex);
1907 err = PTR_ERR(srv_path);
1908 pr_err("RTRS server session allocation failed: %d\n", err);
1912 err = create_con(srv_path, cm_id, cid);
1914 rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
1915 rtrs_rdma_do_reject(cm_id, err);
1917 * Since session has other connections we follow normal way
1918 * through workqueue, but still return an error to tell cma.c
1919 * to call rdma_destroy_id() for current connection.
1921 goto close_and_return_err;
1923 err = rtrs_rdma_do_accept(srv_path, cm_id);
1925 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
1926 rtrs_rdma_do_reject(cm_id, err);
1928 * Since current connection was successfully added to the
1929 * session we follow normal way through workqueue to close the
1930 * session, thus return 0 to tell cma.c we call
1931 * rdma_destroy_id() ourselves.
1934 goto close_and_return_err;
1936 mutex_unlock(&srv->paths_mutex);
1941 return rtrs_rdma_do_reject(cm_id, err);
1943 close_and_return_err:
1944 mutex_unlock(&srv->paths_mutex);
1945 close_path(srv_path);
1950 static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
1951 struct rdma_cm_event *ev)
1953 struct rtrs_srv_path *srv_path = NULL;
1954 struct rtrs_path *s = NULL;
1955 struct rtrs_con *c = NULL;
1957 if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
1959 * In case of error cma.c will destroy cm_id,
1960 * see cma_process_remove()
1962 return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
1963 ev->param.conn.private_data_len);
1967 srv_path = to_srv_path(s);
1969 switch (ev->event) {
1970 case RDMA_CM_EVENT_ESTABLISHED:
1973 case RDMA_CM_EVENT_REJECTED:
1974 case RDMA_CM_EVENT_CONNECT_ERROR:
1975 case RDMA_CM_EVENT_UNREACHABLE:
1976 rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
1977 rdma_event_msg(ev->event), ev->status);
1979 case RDMA_CM_EVENT_DISCONNECTED:
1980 case RDMA_CM_EVENT_ADDR_CHANGE:
1981 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1982 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1983 close_path(srv_path);
1986 pr_err("Ignoring unexpected CM event %s, err %d\n",
1987 rdma_event_msg(ev->event), ev->status);
1994 static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
1995 struct sockaddr *addr,
1996 enum rdma_ucm_port_space ps)
1998 struct rdma_cm_id *cm_id;
2001 cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
2002 ctx, ps, IB_QPT_RC);
2003 if (IS_ERR(cm_id)) {
2004 ret = PTR_ERR(cm_id);
2005 pr_err("Creating id for RDMA connection failed, err: %d\n",
2009 ret = rdma_bind_addr(cm_id, addr);
2011 pr_err("Binding RDMA address failed, err: %d\n", ret);
2014 ret = rdma_listen(cm_id, 64);
2016 pr_err("Listening on RDMA connection failed, err: %d\n",
2024 rdma_destroy_id(cm_id);
2027 return ERR_PTR(ret);
2030 static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
2032 struct sockaddr_in6 sin = {
2033 .sin6_family = AF_INET6,
2034 .sin6_addr = IN6ADDR_ANY_INIT,
2035 .sin6_port = htons(port),
2037 struct sockaddr_ib sib = {
2038 .sib_family = AF_IB,
2039 .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
2040 .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
2041 .sib_pkey = cpu_to_be16(0xffff),
2043 struct rdma_cm_id *cm_ip, *cm_ib;
2047 * We accept both IPoIB and IB connections, so we need to keep
2048 * two cm id's, one for each socket type and port space.
2049 * If the cm initialization of one of the id's fails, we abort
2052 cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
2054 return PTR_ERR(cm_ip);
2056 cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
2057 if (IS_ERR(cm_ib)) {
2058 ret = PTR_ERR(cm_ib);
2062 ctx->cm_id_ip = cm_ip;
2063 ctx->cm_id_ib = cm_ib;
2068 rdma_destroy_id(cm_ip);
2073 static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
2075 struct rtrs_srv_ctx *ctx;
2077 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2082 mutex_init(&ctx->srv_mutex);
2083 INIT_LIST_HEAD(&ctx->srv_list);
2088 static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
2090 WARN_ON(!list_empty(&ctx->srv_list));
2091 mutex_destroy(&ctx->srv_mutex);
2095 static int rtrs_srv_add_one(struct ib_device *device)
2097 struct rtrs_srv_ctx *ctx;
2100 mutex_lock(&ib_ctx.ib_dev_mutex);
2101 if (ib_ctx.ib_dev_count)
2105 * Since our CM IDs are NOT bound to any ib device we will create them
2108 ctx = ib_ctx.srv_ctx;
2109 ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
2112 * We errored out here.
2113 * According to the ib code, if we encounter an error here then the
2114 * error code is ignored, and no more calls to our ops are made.
2116 pr_err("Failed to initialize RDMA connection");
2122 * Keep a track on the number of ib devices added
2124 ib_ctx.ib_dev_count++;
2127 mutex_unlock(&ib_ctx.ib_dev_mutex);
2131 static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
2133 struct rtrs_srv_ctx *ctx;
2135 mutex_lock(&ib_ctx.ib_dev_mutex);
2136 ib_ctx.ib_dev_count--;
2138 if (ib_ctx.ib_dev_count)
2142 * Since our CM IDs are NOT bound to any ib device we will remove them
2143 * only once, when the last device is removed
2145 ctx = ib_ctx.srv_ctx;
2146 rdma_destroy_id(ctx->cm_id_ip);
2147 rdma_destroy_id(ctx->cm_id_ib);
2150 mutex_unlock(&ib_ctx.ib_dev_mutex);
2153 static struct ib_client rtrs_srv_client = {
2154 .name = "rtrs_server",
2155 .add = rtrs_srv_add_one,
2156 .remove = rtrs_srv_remove_one
2160 * rtrs_srv_open() - open RTRS server context
2161 * @ops: callback functions
2162 * @port: port to listen on
2164 * Creates server context with specified callbacks.
2166 * Return a valid pointer on success otherwise PTR_ERR.
2168 struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
2170 struct rtrs_srv_ctx *ctx;
2173 ctx = alloc_srv_ctx(ops);
2175 return ERR_PTR(-ENOMEM);
2177 mutex_init(&ib_ctx.ib_dev_mutex);
2178 ib_ctx.srv_ctx = ctx;
2181 err = ib_register_client(&rtrs_srv_client);
2184 return ERR_PTR(err);
2189 EXPORT_SYMBOL(rtrs_srv_open);
2191 static void close_paths(struct rtrs_srv_sess *srv)
2193 struct rtrs_srv_path *srv_path;
2195 mutex_lock(&srv->paths_mutex);
2196 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
2197 close_path(srv_path);
2198 mutex_unlock(&srv->paths_mutex);
2201 static void close_ctx(struct rtrs_srv_ctx *ctx)
2203 struct rtrs_srv_sess *srv;
2205 mutex_lock(&ctx->srv_mutex);
2206 list_for_each_entry(srv, &ctx->srv_list, ctx_list)
2208 mutex_unlock(&ctx->srv_mutex);
2209 flush_workqueue(rtrs_wq);
2213 * rtrs_srv_close() - close RTRS server context
2214 * @ctx: pointer to server context
2216 * Closes RTRS server context with all client sessions.
2218 void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
2220 ib_unregister_client(&rtrs_srv_client);
2221 mutex_destroy(&ib_ctx.ib_dev_mutex);
2225 EXPORT_SYMBOL(rtrs_srv_close);
2227 static int check_module_params(void)
2229 if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
2230 pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
2231 sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
2234 if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
2235 pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
2236 max_chunk_size, MIN_CHUNK_SIZE);
2241 * Check if IB immediate data size is enough to hold the mem_id and the
2242 * offset inside the memory chunk
2244 if ((ilog2(sess_queue_depth - 1) + 1) +
2245 (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
2246 pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
2247 MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
2254 static int __init rtrs_server_init(void)
2258 pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
2259 KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
2260 max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
2261 sess_queue_depth, always_invalidate);
2263 rtrs_rdma_dev_pd_init(0, &dev_pd);
2265 err = check_module_params();
2267 pr_err("Failed to load module, invalid module parameters, err: %d\n",
2271 err = class_register(&rtrs_dev_class);
2275 rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
2284 class_unregister(&rtrs_dev_class);
2289 static void __exit rtrs_server_exit(void)
2291 destroy_workqueue(rtrs_wq);
2292 class_unregister(&rtrs_dev_class);
2293 rtrs_rdma_dev_pd_deinit(&dev_pd);
2296 module_init(rtrs_server_init);
2297 module_exit(rtrs_server_exit);