1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
20 #define RTRS_CONNECT_TIMEOUT_MS 30000
22 * Wait a bit before trying to reconnect after a failure
23 * in order to give server time to finish clean up which
24 * leads to "false positives" failed reconnect attempts
26 #define RTRS_RECONNECT_BACKOFF 1000
28 * Wait for additional random time between 0 and 8 seconds
29 * before starting to reconnect to avoid clients reconnecting
30 * all at once in case of a major network outage
32 #define RTRS_RECONNECT_SEED 8
34 #define FIRST_CONN 0x01
36 MODULE_DESCRIPTION("RDMA Transport Client");
37 MODULE_LICENSE("GPL");
39 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
40 static struct rtrs_rdma_dev_pd dev_pd = {
44 static struct workqueue_struct *rtrs_wq;
45 static struct class *rtrs_clt_dev_class;
47 static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
49 struct rtrs_clt_sess *sess;
50 bool connected = false;
53 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
54 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
60 static struct rtrs_permit *
61 __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
63 size_t max_depth = clt->queue_depth;
64 struct rtrs_permit *permit;
68 * Adapted from null_blk get_tag(). Callers from different cpus may
69 * grab the same bit, since find_first_zero_bit is not atomic.
70 * But then the test_and_set_bit_lock will fail for all the
71 * callers but one, so that they will loop again.
72 * This way an explicit spinlock is not required.
75 bit = find_first_zero_bit(clt->permits_map, max_depth);
76 if (unlikely(bit >= max_depth))
78 } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
80 permit = get_permit(clt, bit);
81 WARN_ON(permit->mem_id != bit);
82 permit->cpu_id = raw_smp_processor_id();
83 permit->con_type = con_type;
88 static inline void __rtrs_put_permit(struct rtrs_clt *clt,
89 struct rtrs_permit *permit)
91 clear_bit_unlock(permit->mem_id, clt->permits_map);
95 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
96 * @clt: Current session
97 * @con_type: Type of connection to use with the permit
98 * @can_wait: Wait type
101 * Allocates permit for the following RDMA operation. Permit is used
102 * to preallocate all resources and to propagate memory pressure
106 * Can sleep if @wait == RTRS_PERMIT_WAIT
108 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
109 enum rtrs_clt_con_type con_type,
110 enum wait_type can_wait)
112 struct rtrs_permit *permit;
115 permit = __rtrs_get_permit(clt, con_type);
116 if (likely(permit) || !can_wait)
120 prepare_to_wait(&clt->permits_wait, &wait,
121 TASK_UNINTERRUPTIBLE);
122 permit = __rtrs_get_permit(clt, con_type);
129 finish_wait(&clt->permits_wait, &wait);
133 EXPORT_SYMBOL(rtrs_clt_get_permit);
136 * rtrs_clt_put_permit() - puts allocated permit
137 * @clt: Current session
138 * @permit: Permit to be freed
143 void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
145 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
148 __rtrs_put_permit(clt, permit);
151 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
152 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
153 * it must have added itself to &clt->permits_wait before
154 * __rtrs_put_permit() finished.
155 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
157 if (waitqueue_active(&clt->permits_wait))
158 wake_up(&clt->permits_wait);
160 EXPORT_SYMBOL(rtrs_clt_put_permit);
163 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
164 * @sess: client session pointer
165 * @permit: permit for the allocation of the RDMA buffer
167 * IO connection starts from 1.
168 * 0 connection is for user messages.
171 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
172 struct rtrs_permit *permit)
176 if (likely(permit->con_type == RTRS_IO_CON))
177 id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
179 return to_clt_con(sess->s.con[id]);
183 * rtrs_clt_change_state() - change the session state through session state
186 * @sess: client session to change the state of.
187 * @new_state: state to change to.
189 * returns true if sess's state is changed to new state, otherwise return false.
192 * state_wq lock must be hold.
194 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
195 enum rtrs_clt_state new_state)
197 enum rtrs_clt_state old_state;
198 bool changed = false;
200 lockdep_assert_held(&sess->state_wq.lock);
202 old_state = sess->state;
204 case RTRS_CLT_CONNECTING:
206 case RTRS_CLT_RECONNECTING:
213 case RTRS_CLT_RECONNECTING:
215 case RTRS_CLT_CONNECTED:
216 case RTRS_CLT_CONNECTING_ERR:
217 case RTRS_CLT_CLOSED:
224 case RTRS_CLT_CONNECTED:
226 case RTRS_CLT_CONNECTING:
233 case RTRS_CLT_CONNECTING_ERR:
235 case RTRS_CLT_CONNECTING:
242 case RTRS_CLT_CLOSING:
244 case RTRS_CLT_CONNECTING:
245 case RTRS_CLT_CONNECTING_ERR:
246 case RTRS_CLT_RECONNECTING:
247 case RTRS_CLT_CONNECTED:
254 case RTRS_CLT_CLOSED:
256 case RTRS_CLT_CLOSING:
265 case RTRS_CLT_CLOSED:
276 sess->state = new_state;
277 wake_up_locked(&sess->state_wq);
283 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
284 enum rtrs_clt_state old_state,
285 enum rtrs_clt_state new_state)
287 bool changed = false;
289 spin_lock_irq(&sess->state_wq.lock);
290 if (sess->state == old_state)
291 changed = rtrs_clt_change_state(sess, new_state);
292 spin_unlock_irq(&sess->state_wq.lock);
297 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
299 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
301 if (rtrs_clt_change_state_from_to(sess,
303 RTRS_CLT_RECONNECTING)) {
304 struct rtrs_clt *clt = sess->clt;
305 unsigned int delay_ms;
308 * Normal scenario, reconnect if we were successfully connected
310 delay_ms = clt->reconnect_delay_sec * 1000;
311 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
312 msecs_to_jiffies(delay_ms +
313 prandom_u32() % RTRS_RECONNECT_SEED));
316 * Error can happen just on establishing new connection,
317 * so notify waiter with error state, waiter is responsible
318 * for cleaning the rest and reconnect if needed.
320 rtrs_clt_change_state_from_to(sess,
322 RTRS_CLT_CONNECTING_ERR);
326 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
328 struct rtrs_clt_con *con = cq->cq_context;
330 if (unlikely(wc->status != IB_WC_SUCCESS)) {
331 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
332 ib_wc_status_msg(wc->status));
333 rtrs_rdma_error_recovery(con);
337 static struct ib_cqe fast_reg_cqe = {
338 .done = rtrs_clt_fast_reg_done
341 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
342 bool notify, bool can_wait);
344 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
346 struct rtrs_clt_io_req *req =
347 container_of(wc->wr_cqe, typeof(*req), inv_cqe);
348 struct rtrs_clt_con *con = cq->cq_context;
350 if (unlikely(wc->status != IB_WC_SUCCESS)) {
351 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
352 ib_wc_status_msg(wc->status));
353 rtrs_rdma_error_recovery(con);
355 req->need_inv = false;
356 if (likely(req->need_inv_comp))
357 complete(&req->inv_comp);
359 /* Complete request from INV callback */
360 complete_rdma_req(req, req->inv_errno, true, false);
363 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
365 struct rtrs_clt_con *con = req->con;
366 struct ib_send_wr wr = {
367 .opcode = IB_WR_LOCAL_INV,
368 .wr_cqe = &req->inv_cqe,
369 .send_flags = IB_SEND_SIGNALED,
370 .ex.invalidate_rkey = req->mr->rkey,
372 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
374 return ib_post_send(con->c.qp, &wr, NULL);
377 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
378 bool notify, bool can_wait)
380 struct rtrs_clt_con *con = req->con;
381 struct rtrs_clt_sess *sess;
384 if (WARN_ON(!req->in_use))
386 if (WARN_ON(!req->con))
388 sess = to_clt_sess(con->c.sess);
391 if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
393 * We are here to invalidate read requests
394 * ourselves. In normal scenario server should
395 * send INV for all read requests, but
396 * we are here, thus two things could happen:
398 * 1. this is failover, when errno != 0
401 * 2. something totally bad happened and
402 * server forgot to send INV, so we
403 * should do that ourselves.
406 if (likely(can_wait)) {
407 req->need_inv_comp = true;
409 /* This should be IO path, so always notify */
411 /* Save errno for INV callback */
412 req->inv_errno = errno;
415 err = rtrs_inv_rkey(req);
417 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
419 } else if (likely(can_wait)) {
420 wait_for_completion(&req->inv_comp);
423 * Something went wrong, so request will be
424 * completed from INV callback.
431 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
432 req->sg_cnt, req->dir);
434 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
435 atomic_dec(&sess->stats->inflight);
441 req->conf(req->priv, errno);
444 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
445 struct rtrs_clt_io_req *req,
446 struct rtrs_rbuf *rbuf, u32 off,
447 u32 imm, struct ib_send_wr *wr)
449 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
450 enum ib_send_flags flags;
453 if (unlikely(!req->sg_size)) {
454 rtrs_wrn(con->c.sess,
455 "Doing RDMA Write failed, no data supplied\n");
459 /* user data and user message in the first list element */
460 sge.addr = req->iu->dma_addr;
461 sge.length = req->sg_size;
462 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
465 * From time to time we have to post signalled sends,
466 * or send queue will fill up and only QP reset can help.
468 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
469 0 : IB_SEND_SIGNALED;
471 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
472 req->sg_size, DMA_TO_DEVICE);
474 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
475 rbuf->rkey, rbuf->addr + off,
479 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
480 s16 errno, bool w_inval)
482 struct rtrs_clt_io_req *req;
484 if (WARN_ON(msg_id >= sess->queue_depth))
487 req = &sess->reqs[msg_id];
488 /* Drop need_inv if server responded with send with invalidation */
489 req->need_inv &= !w_inval;
490 complete_rdma_req(req, errno, true, false);
493 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
497 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
499 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
500 iu = container_of(wc->wr_cqe, struct rtrs_iu,
502 err = rtrs_iu_post_recv(&con->c, iu);
504 rtrs_err(con->c.sess, "post iu failed %d\n", err);
505 rtrs_rdma_error_recovery(con);
509 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
511 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
512 struct rtrs_msg_rkey_rsp *msg;
513 u32 imm_type, imm_payload;
514 bool w_inval = false;
519 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
521 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
523 if (unlikely(wc->byte_len < sizeof(*msg))) {
524 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
528 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
529 iu->size, DMA_FROM_DEVICE);
531 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
532 rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
533 le16_to_cpu(msg->type));
536 buf_id = le16_to_cpu(msg->buf_id);
537 if (WARN_ON(buf_id >= sess->queue_depth))
540 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
541 if (likely(imm_type == RTRS_IO_RSP_IMM ||
542 imm_type == RTRS_IO_RSP_W_INV_IMM)) {
545 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
546 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
548 if (WARN_ON(buf_id != msg_id))
550 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
551 process_io_rsp(sess, msg_id, err, w_inval);
553 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
554 iu->size, DMA_FROM_DEVICE);
555 return rtrs_clt_recv_done(con, wc);
557 rtrs_rdma_error_recovery(con);
560 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
562 static struct ib_cqe io_comp_cqe = {
563 .done = rtrs_clt_rdma_done
567 * Post x2 empty WRs: first is for this RDMA with IMM,
568 * second is for RECV with INV, which happened earlier.
570 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
572 struct ib_recv_wr wr_arr[2], *wr;
575 memset(wr_arr, 0, sizeof(wr_arr));
576 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
580 /* Chain backwards */
581 wr->next = &wr_arr[i - 1];
584 return ib_post_recv(con->qp, wr, NULL);
587 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
589 struct rtrs_clt_con *con = cq->cq_context;
590 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
591 u32 imm_type, imm_payload;
592 bool w_inval = false;
595 if (unlikely(wc->status != IB_WC_SUCCESS)) {
596 if (wc->status != IB_WC_WR_FLUSH_ERR) {
597 rtrs_err(sess->clt, "RDMA failed: %s\n",
598 ib_wc_status_msg(wc->status));
599 rtrs_rdma_error_recovery(con);
603 rtrs_clt_update_wc_stats(con);
605 switch (wc->opcode) {
606 case IB_WC_RECV_RDMA_WITH_IMM:
608 * post_recv() RDMA write completions of IO reqs (read/write)
611 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
613 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
614 &imm_type, &imm_payload);
615 if (likely(imm_type == RTRS_IO_RSP_IMM ||
616 imm_type == RTRS_IO_RSP_W_INV_IMM)) {
619 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
620 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
622 process_io_rsp(sess, msg_id, err, w_inval);
623 } else if (imm_type == RTRS_HB_MSG_IMM) {
625 rtrs_send_hb_ack(&sess->s);
626 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
627 return rtrs_clt_recv_done(con, wc);
628 } else if (imm_type == RTRS_HB_ACK_IMM) {
630 sess->s.hb_missed_cnt = 0;
631 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
632 return rtrs_clt_recv_done(con, wc);
634 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
639 * Post x2 empty WRs: first is for this RDMA with IMM,
640 * second is for RECV with INV, which happened earlier.
642 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
644 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
646 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
648 rtrs_rdma_error_recovery(con);
654 * Key invalidations from server side
656 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
657 wc->wc_flags & IB_WC_WITH_IMM));
658 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
659 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
660 if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
661 return rtrs_clt_recv_done(con, wc);
663 return rtrs_clt_rkey_rsp_done(con, wc);
666 case IB_WC_RDMA_WRITE:
668 * post_send() RDMA write completions of IO reqs (read/write)
673 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
678 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
681 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
683 for (i = 0; i < q_size; i++) {
684 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
685 struct rtrs_iu *iu = &con->rsp_ius[i];
687 err = rtrs_iu_post_recv(&con->c, iu);
689 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
698 static int post_recv_sess(struct rtrs_clt_sess *sess)
703 for (cid = 0; cid < sess->s.con_num; cid++) {
705 q_size = SERVICE_CON_QUEUE_DEPTH;
707 q_size = sess->queue_depth;
710 * x2 for RDMA read responses + FR key invalidations,
711 * RDMA writes do not require any FR registrations.
715 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
717 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
727 struct list_head skip_list;
728 struct rtrs_clt *clt;
729 struct rtrs_clt_sess *(*next_path)(struct path_it *it);
733 * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
734 * @head: the head for the list.
735 * @ptr: the list head to take the next element from.
736 * @type: the type of the struct this is embedded in.
737 * @memb: the name of the list_head within the struct.
739 * Next element returned in round-robin fashion, i.e. head will be skipped,
740 * but if list is observed as empty, NULL will be returned.
742 * This primitive may safely run concurrently with the _rcu list-mutation
743 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
745 #define list_next_or_null_rr_rcu(head, ptr, type, memb) \
747 list_next_or_null_rcu(head, ptr, type, memb) ?: \
748 list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
753 * get_next_path_rr() - Returns path in round-robin fashion.
754 * @it: the path pointer
756 * Related to @MP_POLICY_RR
759 * rcu_read_lock() must be hold.
761 static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
763 struct rtrs_clt_sess __rcu **ppcpu_path;
764 struct rtrs_clt_sess *path;
765 struct rtrs_clt *clt;
770 * Here we use two RCU objects: @paths_list and @pcpu_path
771 * pointer. See rtrs_clt_remove_path_from_arr() for details
772 * how that is handled.
775 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
776 path = rcu_dereference(*ppcpu_path);
778 path = list_first_or_null_rcu(&clt->paths_list,
779 typeof(*path), s.entry);
781 path = list_next_or_null_rr_rcu(&clt->paths_list,
785 rcu_assign_pointer(*ppcpu_path, path);
791 * get_next_path_min_inflight() - Returns path with minimal inflight count.
792 * @it: the path pointer
794 * Related to @MP_POLICY_MIN_INFLIGHT
797 * rcu_read_lock() must be hold.
799 static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
801 struct rtrs_clt_sess *min_path = NULL;
802 struct rtrs_clt *clt = it->clt;
803 struct rtrs_clt_sess *sess;
804 int min_inflight = INT_MAX;
807 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
808 if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
811 inflight = atomic_read(&sess->stats->inflight);
813 if (inflight < min_inflight) {
814 min_inflight = inflight;
820 * add the path to the skip list, so that next time we can get
824 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
829 static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
831 INIT_LIST_HEAD(&it->skip_list);
835 if (clt->mp_policy == MP_POLICY_RR)
836 it->next_path = get_next_path_rr;
838 it->next_path = get_next_path_min_inflight;
841 static inline void path_it_deinit(struct path_it *it)
843 struct list_head *skip, *tmp;
845 * The skip_list is used only for the MIN_INFLIGHT policy.
846 * We need to remove paths from it, so that next IO can insert
847 * paths (->mp_skip_entry) into a skip_list again.
849 list_for_each_safe(skip, tmp, &it->skip_list)
854 * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
855 * about an inflight IO.
856 * The user buffer holding user control message (not data) is copied into
857 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
858 * also hold the control message of rtrs.
859 * @req: an io request holding information about IO.
860 * @sess: client session
861 * @conf: conformation callback function to notify upper layer.
862 * @permit: permit for allocation of RDMA remote buffer
863 * @priv: private pointer
864 * @vec: kernel vector containing control message
865 * @usr_len: length of the user message
866 * @sg: scater list for IO data
867 * @sg_cnt: number of scater list entries
868 * @data_len: length of the IO data
869 * @dir: direction of the IO.
871 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
872 struct rtrs_clt_sess *sess,
873 void (*conf)(void *priv, int errno),
874 struct rtrs_permit *permit, void *priv,
875 const struct kvec *vec, size_t usr_len,
876 struct scatterlist *sg, size_t sg_cnt,
877 size_t data_len, int dir)
879 struct iov_iter iter;
882 req->permit = permit;
884 req->usr_len = usr_len;
885 req->data_len = data_len;
887 req->sg_cnt = sg_cnt;
890 req->con = rtrs_permit_to_clt_con(sess, permit);
892 req->need_inv = false;
893 req->need_inv_comp = false;
896 iov_iter_kvec(&iter, READ, vec, 1, usr_len);
897 len = _copy_from_iter(req->iu->buf, usr_len, &iter);
898 WARN_ON(len != usr_len);
900 reinit_completion(&req->inv_comp);
903 static struct rtrs_clt_io_req *
904 rtrs_clt_get_req(struct rtrs_clt_sess *sess,
905 void (*conf)(void *priv, int errno),
906 struct rtrs_permit *permit, void *priv,
907 const struct kvec *vec, size_t usr_len,
908 struct scatterlist *sg, size_t sg_cnt,
909 size_t data_len, int dir)
911 struct rtrs_clt_io_req *req;
913 req = &sess->reqs[permit->mem_id];
914 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
915 sg, sg_cnt, data_len, dir);
919 static struct rtrs_clt_io_req *
920 rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
921 struct rtrs_clt_io_req *fail_req)
923 struct rtrs_clt_io_req *req;
925 .iov_base = fail_req->iu->buf,
926 .iov_len = fail_req->usr_len
929 req = &alive_sess->reqs[fail_req->permit->mem_id];
930 rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
931 fail_req->priv, &vec, fail_req->usr_len,
932 fail_req->sglist, fail_req->sg_cnt,
933 fail_req->data_len, fail_req->dir);
937 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
938 struct rtrs_clt_io_req *req,
939 struct rtrs_rbuf *rbuf,
942 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
943 struct ib_sge *sge = req->sge;
944 enum ib_send_flags flags;
945 struct scatterlist *sg;
949 for_each_sg(req->sglist, sg, req->sg_cnt, i) {
950 sge[i].addr = sg_dma_address(sg);
951 sge[i].length = sg_dma_len(sg);
952 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
954 sge[i].addr = req->iu->dma_addr;
955 sge[i].length = size;
956 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
958 num_sge = 1 + req->sg_cnt;
961 * From time to time we have to post signalled sends,
962 * or send queue will fill up and only QP reset can help.
964 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
965 0 : IB_SEND_SIGNALED;
967 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
968 size, DMA_TO_DEVICE);
970 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
971 rbuf->rkey, rbuf->addr, imm,
975 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
977 struct rtrs_clt_con *con = req->con;
978 struct rtrs_sess *s = con->c.sess;
979 struct rtrs_clt_sess *sess = to_clt_sess(s);
980 struct rtrs_msg_rdma_write *msg;
982 struct rtrs_rbuf *rbuf;
986 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
988 if (unlikely(tsize > sess->chunk_size)) {
989 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
990 tsize, sess->chunk_size);
994 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
995 req->sg_cnt, req->dir);
996 if (unlikely(!count)) {
997 rtrs_wrn(s, "Write request failed, map failed\n");
1001 /* put rtrs msg after sg and user message */
1002 msg = req->iu->buf + req->usr_len;
1003 msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1004 msg->usr_len = cpu_to_le16(req->usr_len);
1006 /* rtrs message on server side will be after user data and message */
1007 imm = req->permit->mem_off + req->data_len + req->usr_len;
1008 imm = rtrs_to_io_req_imm(imm);
1009 buf_id = req->permit->mem_id;
1010 req->sg_size = tsize;
1011 rbuf = &sess->rbufs[buf_id];
1014 * Update stats now, after request is successfully sent it is not
1015 * safe anymore to touch it.
1017 rtrs_clt_update_all_stats(req, WRITE);
1019 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
1020 req->usr_len + sizeof(*msg),
1022 if (unlikely(ret)) {
1023 rtrs_err(s, "Write request failed: %d\n", ret);
1024 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1025 atomic_dec(&sess->stats->inflight);
1027 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1028 req->sg_cnt, req->dir);
1034 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1038 /* Align the MR to a 4K page size to match the block virt boundary */
1039 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1042 if (unlikely(nr < req->sg_cnt))
1044 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1049 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1051 struct rtrs_clt_con *con = req->con;
1052 struct rtrs_sess *s = con->c.sess;
1053 struct rtrs_clt_sess *sess = to_clt_sess(s);
1054 struct rtrs_msg_rdma_read *msg;
1055 struct rtrs_ib_dev *dev;
1057 struct ib_reg_wr rwr;
1058 struct ib_send_wr *wr = NULL;
1063 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1068 if (unlikely(tsize > sess->chunk_size)) {
1070 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1071 tsize, sess->chunk_size);
1076 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1078 if (unlikely(!count)) {
1080 "Read request failed, dma map failed\n");
1084 /* put our message into req->buf after user message*/
1085 msg = req->iu->buf + req->usr_len;
1086 msg->type = cpu_to_le16(RTRS_MSG_READ);
1087 msg->usr_len = cpu_to_le16(req->usr_len);
1090 ret = rtrs_map_sg_fr(req, count);
1093 "Read request failed, failed to map fast reg. data, err: %d\n",
1095 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1099 rwr = (struct ib_reg_wr) {
1100 .wr.opcode = IB_WR_REG_MR,
1101 .wr.wr_cqe = &fast_reg_cqe,
1103 .key = req->mr->rkey,
1104 .access = (IB_ACCESS_LOCAL_WRITE |
1105 IB_ACCESS_REMOTE_WRITE),
1109 msg->sg_cnt = cpu_to_le16(1);
1110 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1112 msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1113 msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1114 msg->desc[0].len = cpu_to_le32(req->mr->length);
1116 /* Further invalidation is required */
1117 req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1124 * rtrs message will be after the space reserved for disk data and
1127 imm = req->permit->mem_off + req->data_len + req->usr_len;
1128 imm = rtrs_to_io_req_imm(imm);
1129 buf_id = req->permit->mem_id;
1131 req->sg_size = sizeof(*msg);
1132 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1133 req->sg_size += req->usr_len;
1136 * Update stats now, after request is successfully sent it is not
1137 * safe anymore to touch it.
1139 rtrs_clt_update_all_stats(req, READ);
1141 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1142 req->data_len, imm, wr);
1143 if (unlikely(ret)) {
1144 rtrs_err(s, "Read request failed: %d\n", ret);
1145 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1146 atomic_dec(&sess->stats->inflight);
1147 req->need_inv = false;
1149 ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1150 req->sg_cnt, req->dir);
1157 * rtrs_clt_failover_req() Try to find an active path for a failed request
1159 * @fail_req: a failed io request.
1161 static int rtrs_clt_failover_req(struct rtrs_clt *clt,
1162 struct rtrs_clt_io_req *fail_req)
1164 struct rtrs_clt_sess *alive_sess;
1165 struct rtrs_clt_io_req *req;
1166 int err = -ECONNABORTED;
1170 for (path_it_init(&it, clt);
1171 (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
1173 if (unlikely(READ_ONCE(alive_sess->state) !=
1174 RTRS_CLT_CONNECTED))
1176 req = rtrs_clt_get_copy_req(alive_sess, fail_req);
1177 if (req->dir == DMA_TO_DEVICE)
1178 err = rtrs_clt_write_req(req);
1180 err = rtrs_clt_read_req(req);
1181 if (unlikely(err)) {
1182 req->in_use = false;
1186 rtrs_clt_inc_failover_cnt(alive_sess->stats);
1189 path_it_deinit(&it);
1195 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
1197 struct rtrs_clt *clt = sess->clt;
1198 struct rtrs_clt_io_req *req;
1203 for (i = 0; i < sess->queue_depth; ++i) {
1204 req = &sess->reqs[i];
1209 * Safely (without notification) complete failed request.
1210 * After completion this request is still useble and can
1211 * be failovered to another path.
1213 complete_rdma_req(req, -ECONNABORTED, false, true);
1215 err = rtrs_clt_failover_req(clt, req);
1217 /* Failover failed, notify anyway */
1218 req->conf(req->priv, err);
1222 static void free_sess_reqs(struct rtrs_clt_sess *sess)
1224 struct rtrs_clt_io_req *req;
1229 for (i = 0; i < sess->queue_depth; ++i) {
1230 req = &sess->reqs[i];
1232 ib_dereg_mr(req->mr);
1234 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
1240 static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
1242 struct rtrs_clt_io_req *req;
1243 struct rtrs_clt *clt = sess->clt;
1244 int i, err = -ENOMEM;
1246 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
1251 for (i = 0; i < sess->queue_depth; ++i) {
1252 req = &sess->reqs[i];
1253 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
1254 sess->s.dev->ib_dev,
1256 rtrs_clt_rdma_done);
1260 req->sge = kmalloc_array(clt->max_segments + 1,
1261 sizeof(*req->sge), GFP_KERNEL);
1265 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
1266 sess->max_pages_per_mr);
1267 if (IS_ERR(req->mr)) {
1268 err = PTR_ERR(req->mr);
1270 pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
1271 sess->max_pages_per_mr);
1275 init_completion(&req->inv_comp);
1281 free_sess_reqs(sess);
1286 static int alloc_permits(struct rtrs_clt *clt)
1288 unsigned int chunk_bits;
1291 clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
1292 sizeof(long), GFP_KERNEL);
1293 if (!clt->permits_map) {
1297 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1298 if (!clt->permits) {
1302 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1303 for (i = 0; i < clt->queue_depth; i++) {
1304 struct rtrs_permit *permit;
1306 permit = get_permit(clt, i);
1308 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1314 kfree(clt->permits_map);
1315 clt->permits_map = NULL;
1320 static void free_permits(struct rtrs_clt *clt)
1322 if (clt->permits_map) {
1323 size_t sz = clt->queue_depth;
1325 wait_event(clt->permits_wait,
1326 find_first_bit(clt->permits_map, sz) >= sz);
1328 kfree(clt->permits_map);
1329 clt->permits_map = NULL;
1330 kfree(clt->permits);
1331 clt->permits = NULL;
1334 static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
1336 struct ib_device *ib_dev;
1337 u64 max_pages_per_mr;
1340 ib_dev = sess->s.dev->ib_dev;
1343 * Use the smallest page size supported by the HCA, down to a
1344 * minimum of 4096 bytes. We're unlikely to build large sglists
1345 * out of smaller entries.
1347 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1348 max_pages_per_mr = ib_dev->attrs.max_mr_size;
1349 do_div(max_pages_per_mr, (1ull << mr_page_shift));
1350 sess->max_pages_per_mr =
1351 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
1352 ib_dev->attrs.max_fast_reg_page_list_len);
1353 sess->max_send_sge = ib_dev->attrs.max_send_sge;
1356 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
1357 enum rtrs_clt_state new_state,
1358 enum rtrs_clt_state *old_state)
1362 spin_lock_irq(&sess->state_wq.lock);
1364 *old_state = sess->state;
1365 changed = rtrs_clt_change_state(sess, new_state);
1366 spin_unlock_irq(&sess->state_wq.lock);
1371 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1373 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1375 rtrs_rdma_error_recovery(con);
1378 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
1380 rtrs_init_hb(&sess->s, &io_comp_cqe,
1381 RTRS_HB_INTERVAL_MS,
1383 rtrs_clt_hb_err_handler,
1387 static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
1389 rtrs_start_hb(&sess->s);
1392 static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
1394 rtrs_stop_hb(&sess->s);
1397 static void rtrs_clt_reconnect_work(struct work_struct *work);
1398 static void rtrs_clt_close_work(struct work_struct *work);
1400 static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
1401 const struct rtrs_addr *path,
1402 size_t con_num, u16 max_segments,
1405 struct rtrs_clt_sess *sess;
1410 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
1416 * +1: Extra connection for user messages
1418 total_con = con_num + nr_poll_queues + 1;
1419 sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
1423 sess->s.con_num = total_con;
1424 sess->s.irq_con_num = con_num + 1;
1426 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
1430 mutex_init(&sess->init_mutex);
1431 uuid_gen(&sess->s.uuid);
1432 memcpy(&sess->s.dst_addr, path->dst,
1433 rdma_addr_size((struct sockaddr *)path->dst));
1436 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1437 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1438 * the sess->src_addr will contain only zeros, which is then fine.
1441 memcpy(&sess->s.src_addr, path->src,
1442 rdma_addr_size((struct sockaddr *)path->src));
1443 strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
1445 sess->max_pages_per_mr = max_segments;
1446 init_waitqueue_head(&sess->state_wq);
1447 sess->state = RTRS_CLT_CONNECTING;
1448 atomic_set(&sess->connected_cnt, 0);
1449 INIT_WORK(&sess->close_work, rtrs_clt_close_work);
1450 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
1451 rtrs_clt_init_hb(sess);
1453 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
1454 if (!sess->mp_skip_entry)
1455 goto err_free_stats;
1457 for_each_possible_cpu(cpu)
1458 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
1460 err = rtrs_clt_init_stats(sess->stats);
1462 goto err_free_percpu;
1467 free_percpu(sess->mp_skip_entry);
1475 return ERR_PTR(err);
1478 void free_sess(struct rtrs_clt_sess *sess)
1480 free_percpu(sess->mp_skip_entry);
1481 mutex_destroy(&sess->init_mutex);
1487 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
1489 struct rtrs_clt_con *con;
1491 con = kzalloc(sizeof(*con), GFP_KERNEL);
1495 /* Map first two connections to the first CPU */
1496 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1498 con->c.sess = &sess->s;
1499 atomic_set(&con->io_cnt, 0);
1500 mutex_init(&con->con_mutex);
1502 sess->s.con[cid] = &con->c;
1507 static void destroy_con(struct rtrs_clt_con *con)
1509 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1511 sess->s.con[con->c.cid] = NULL;
1512 mutex_destroy(&con->con_mutex);
1516 static int create_con_cq_qp(struct rtrs_clt_con *con)
1518 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1519 u32 max_send_wr, max_recv_wr, cq_size;
1521 struct rtrs_msg_rkey_rsp *rsp;
1523 lockdep_assert_held(&con->con_mutex);
1524 if (con->c.cid == 0) {
1526 * One completion for each receive and two for each send
1527 * (send request + registration)
1528 * + 2 for drain and heartbeat
1529 * in case qp gets into error state
1531 max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1532 max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1533 /* We must be the first here */
1534 if (WARN_ON(sess->s.dev))
1538 * The whole session uses device from user connection.
1539 * Be careful not to close user connection before ib dev
1540 * is gracefully put.
1542 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1546 "rtrs_ib_dev_find_get_or_add(): no memory\n");
1549 sess->s.dev_ref = 1;
1550 query_fast_reg_mode(sess);
1553 * Here we assume that session members are correctly set.
1554 * This is always true if user connection (cid == 0) is
1555 * established first.
1557 if (WARN_ON(!sess->s.dev))
1559 if (WARN_ON(!sess->queue_depth))
1562 /* Shared between connections */
1565 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1566 /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1567 sess->queue_depth * 3 + 1);
1569 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1570 sess->queue_depth * 3 + 1);
1572 /* alloc iu to recv new rkey reply when server reports flags set */
1573 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1574 con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
1575 GFP_KERNEL, sess->s.dev->ib_dev,
1577 rtrs_clt_rdma_done);
1580 con->queue_size = max_recv_wr;
1582 cq_size = max_send_wr + max_recv_wr;
1583 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1584 if (con->c.cid >= sess->s.irq_con_num)
1585 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1586 cq_vector, cq_size, max_send_wr,
1587 max_recv_wr, IB_POLL_DIRECT);
1589 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1590 cq_vector, cq_size, max_send_wr,
1591 max_recv_wr, IB_POLL_SOFTIRQ);
1593 * In case of error we do not bother to clean previous allocations,
1594 * since destroy_con_cq_qp() must be called.
1599 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1601 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1604 * Be careful here: destroy_con_cq_qp() can be called even
1605 * create_con_cq_qp() failed, see comments there.
1607 lockdep_assert_held(&con->con_mutex);
1608 rtrs_cq_qp_destroy(&con->c);
1610 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
1611 con->rsp_ius = NULL;
1612 con->queue_size = 0;
1614 if (sess->s.dev_ref && !--sess->s.dev_ref) {
1615 rtrs_ib_dev_put(sess->s.dev);
1620 static void stop_cm(struct rtrs_clt_con *con)
1622 rdma_disconnect(con->c.cm_id);
1624 ib_drain_qp(con->c.qp);
1627 static void destroy_cm(struct rtrs_clt_con *con)
1629 rdma_destroy_id(con->c.cm_id);
1630 con->c.cm_id = NULL;
1633 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1635 struct rtrs_sess *s = con->c.sess;
1638 mutex_lock(&con->con_mutex);
1639 err = create_con_cq_qp(con);
1640 mutex_unlock(&con->con_mutex);
1642 rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1645 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1647 rtrs_err(s, "Resolving route failed, err: %d\n", err);
1652 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1654 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1655 struct rtrs_clt *clt = sess->clt;
1656 struct rtrs_msg_conn_req msg;
1657 struct rdma_conn_param param;
1661 param = (struct rdma_conn_param) {
1663 .rnr_retry_count = 7,
1664 .private_data = &msg,
1665 .private_data_len = sizeof(msg),
1668 msg = (struct rtrs_msg_conn_req) {
1669 .magic = cpu_to_le16(RTRS_MAGIC),
1670 .version = cpu_to_le16(RTRS_PROTO_VER),
1671 .cid = cpu_to_le16(con->c.cid),
1672 .cid_num = cpu_to_le16(sess->s.con_num),
1673 .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
1675 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
1676 uuid_copy(&msg.sess_uuid, &sess->s.uuid);
1677 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1679 err = rdma_connect_locked(con->c.cm_id, ¶m);
1681 rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1686 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1687 struct rdma_cm_event *ev)
1689 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1690 struct rtrs_clt *clt = sess->clt;
1691 const struct rtrs_msg_conn_rsp *msg;
1692 u16 version, queue_depth;
1696 msg = ev->param.conn.private_data;
1697 len = ev->param.conn.private_data_len;
1698 if (len < sizeof(*msg)) {
1699 rtrs_err(clt, "Invalid RTRS connection response\n");
1702 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1703 rtrs_err(clt, "Invalid RTRS magic\n");
1706 version = le16_to_cpu(msg->version);
1707 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1708 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1709 version >> 8, RTRS_PROTO_VER_MAJOR);
1712 errno = le16_to_cpu(msg->errno);
1714 rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1718 if (con->c.cid == 0) {
1719 queue_depth = le16_to_cpu(msg->queue_depth);
1721 if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
1722 rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
1726 if (!sess->rbufs || sess->queue_depth < queue_depth) {
1728 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
1733 sess->queue_depth = queue_depth;
1734 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1735 sess->max_io_size = le32_to_cpu(msg->max_io_size);
1736 sess->flags = le32_to_cpu(msg->flags);
1737 sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
1740 * Global queue depth and IO size is always a minimum.
1741 * If while a reconnection server sends us a value a bit
1742 * higher - client does not care and uses cached minimum.
1744 * Since we can have several sessions (paths) restablishing
1745 * connections in parallel, use lock.
1747 mutex_lock(&clt->paths_mutex);
1748 clt->queue_depth = min_not_zero(sess->queue_depth,
1750 clt->max_io_size = min_not_zero(sess->max_io_size,
1752 mutex_unlock(&clt->paths_mutex);
1755 * Cache the hca_port and hca_name for sysfs
1757 sess->hca_port = con->c.cm_id->port_num;
1758 scnprintf(sess->hca_name, sizeof(sess->hca_name),
1759 sess->s.dev->ib_dev->name);
1760 sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1761 /* set for_new_clt, to allow future reconnect on any path */
1762 sess->for_new_clt = 1;
1768 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1770 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1772 atomic_inc(&sess->connected_cnt);
1776 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1777 struct rdma_cm_event *ev)
1779 struct rtrs_sess *s = con->c.sess;
1780 const struct rtrs_msg_conn_rsp *msg;
1781 const char *rej_msg;
1785 status = ev->status;
1786 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1787 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1789 if (msg && data_len >= sizeof(*msg)) {
1790 errno = (int16_t)le16_to_cpu(msg->errno);
1791 if (errno == -EBUSY)
1793 "Previous session is still exists on the server, please reconnect later\n");
1796 "Connect rejected: status %d (%s), rtrs errno %d\n",
1797 status, rej_msg, errno);
1800 "Connect rejected but with malformed message: status %d (%s)\n",
1807 static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
1809 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
1810 queue_work(rtrs_wq, &sess->close_work);
1812 flush_work(&sess->close_work);
1815 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1817 if (con->cm_err == 1) {
1818 struct rtrs_clt_sess *sess;
1820 sess = to_clt_sess(con->c.sess);
1821 if (atomic_dec_and_test(&sess->connected_cnt))
1823 wake_up(&sess->state_wq);
1825 con->cm_err = cm_err;
1828 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1829 struct rdma_cm_event *ev)
1831 struct rtrs_clt_con *con = cm_id->context;
1832 struct rtrs_sess *s = con->c.sess;
1833 struct rtrs_clt_sess *sess = to_clt_sess(s);
1836 switch (ev->event) {
1837 case RDMA_CM_EVENT_ADDR_RESOLVED:
1838 cm_err = rtrs_rdma_addr_resolved(con);
1840 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1841 cm_err = rtrs_rdma_route_resolved(con);
1843 case RDMA_CM_EVENT_ESTABLISHED:
1844 cm_err = rtrs_rdma_conn_established(con, ev);
1845 if (likely(!cm_err)) {
1847 * Report success and wake up. Here we abuse state_wq,
1848 * i.e. wake up without state change, but we set cm_err.
1850 flag_success_on_conn(con);
1851 wake_up(&sess->state_wq);
1855 case RDMA_CM_EVENT_REJECTED:
1856 cm_err = rtrs_rdma_conn_rejected(con, ev);
1858 case RDMA_CM_EVENT_DISCONNECTED:
1859 /* No message for disconnecting */
1860 cm_err = -ECONNRESET;
1862 case RDMA_CM_EVENT_CONNECT_ERROR:
1863 case RDMA_CM_EVENT_UNREACHABLE:
1864 case RDMA_CM_EVENT_ADDR_CHANGE:
1865 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1866 rtrs_wrn(s, "CM error event %d\n", ev->event);
1867 cm_err = -ECONNRESET;
1869 case RDMA_CM_EVENT_ADDR_ERROR:
1870 case RDMA_CM_EVENT_ROUTE_ERROR:
1871 rtrs_wrn(s, "CM error event %d\n", ev->event);
1872 cm_err = -EHOSTUNREACH;
1874 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1876 * Device removal is a special case. Queue close and return 0.
1878 rtrs_clt_close_conns(sess, false);
1881 rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
1882 cm_err = -ECONNRESET;
1888 * cm error makes sense only on connection establishing,
1889 * in other cases we rely on normal procedure of reconnecting.
1891 flag_error_on_conn(con, cm_err);
1892 rtrs_rdma_error_recovery(con);
1898 static int create_cm(struct rtrs_clt_con *con)
1900 struct rtrs_sess *s = con->c.sess;
1901 struct rtrs_clt_sess *sess = to_clt_sess(s);
1902 struct rdma_cm_id *cm_id;
1905 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
1906 sess->s.dst_addr.ss_family == AF_IB ?
1907 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
1908 if (IS_ERR(cm_id)) {
1909 err = PTR_ERR(cm_id);
1910 rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
1914 con->c.cm_id = cm_id;
1916 /* allow the port to be reused */
1917 err = rdma_set_reuseaddr(cm_id, 1);
1919 rtrs_err(s, "Set address reuse failed, err: %d\n", err);
1922 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
1923 (struct sockaddr *)&sess->s.dst_addr,
1924 RTRS_CONNECT_TIMEOUT_MS);
1926 rtrs_err(s, "Failed to resolve address, err: %d\n", err);
1930 * Combine connection status and session events. This is needed
1931 * for waiting two possible cases: cm_err has something meaningful
1932 * or session state was really changed to error by device removal.
1934 err = wait_event_interruptible_timeout(
1936 con->cm_err || sess->state != RTRS_CLT_CONNECTING,
1937 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
1938 if (err == 0 || err == -ERESTARTSYS) {
1941 /* Timedout or interrupted */
1944 if (con->cm_err < 0) {
1948 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
1949 /* Device removal */
1950 err = -ECONNABORTED;
1958 mutex_lock(&con->con_mutex);
1959 destroy_con_cq_qp(con);
1960 mutex_unlock(&con->con_mutex);
1967 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
1969 struct rtrs_clt *clt = sess->clt;
1973 * We can fire RECONNECTED event only when all paths were
1974 * connected on rtrs_clt_open(), then each was disconnected
1975 * and the first one connected again. That's why this nasty
1976 * game with counter value.
1979 mutex_lock(&clt->paths_ev_mutex);
1980 up = ++clt->paths_up;
1982 * Here it is safe to access paths num directly since up counter
1983 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
1984 * in progress, thus paths removals are impossible.
1986 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
1987 clt->paths_up = clt->paths_num;
1989 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
1990 mutex_unlock(&clt->paths_ev_mutex);
1992 /* Mark session as established */
1993 sess->established = true;
1994 sess->reconnect_attempts = 0;
1995 sess->stats->reconnects.successful_cnt++;
1998 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
2000 struct rtrs_clt *clt = sess->clt;
2002 if (!sess->established)
2005 sess->established = false;
2006 mutex_lock(&clt->paths_ev_mutex);
2007 WARN_ON(!clt->paths_up);
2008 if (--clt->paths_up == 0)
2009 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2010 mutex_unlock(&clt->paths_ev_mutex);
2013 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
2015 struct rtrs_clt_con *con;
2018 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
2021 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2022 * exactly in between. Start destroying after it finishes.
2024 mutex_lock(&sess->init_mutex);
2025 mutex_unlock(&sess->init_mutex);
2028 * All IO paths must observe !CONNECTED state before we
2033 rtrs_clt_stop_hb(sess);
2036 * The order it utterly crucial: firstly disconnect and complete all
2037 * rdma requests with error (thus set in_use=false for requests),
2038 * then fail outstanding requests checking in_use for each, and
2039 * eventually notify upper layer about session disconnection.
2042 for (cid = 0; cid < sess->s.con_num; cid++) {
2043 if (!sess->s.con[cid])
2045 con = to_clt_con(sess->s.con[cid]);
2048 fail_all_outstanding_reqs(sess);
2049 free_sess_reqs(sess);
2050 rtrs_clt_sess_down(sess);
2053 * Wait for graceful shutdown, namely when peer side invokes
2054 * rdma_disconnect(). 'connected_cnt' is decremented only on
2055 * CM events, thus if other side had crashed and hb has detected
2056 * something is wrong, here we will stuck for exactly timeout ms,
2057 * since CM does not fire anything. That is fine, we are not in
2060 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
2061 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2063 for (cid = 0; cid < sess->s.con_num; cid++) {
2064 if (!sess->s.con[cid])
2066 con = to_clt_con(sess->s.con[cid]);
2067 mutex_lock(&con->con_mutex);
2068 destroy_con_cq_qp(con);
2069 mutex_unlock(&con->con_mutex);
2075 static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
2076 struct rtrs_clt_sess *sess,
2077 struct rtrs_clt_sess *next)
2079 struct rtrs_clt_sess **ppcpu_path;
2081 /* Call cmpxchg() without sparse warnings */
2082 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
2083 return sess == cmpxchg(ppcpu_path, sess, next);
2086 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
2088 struct rtrs_clt *clt = sess->clt;
2089 struct rtrs_clt_sess *next;
2090 bool wait_for_grace = false;
2093 mutex_lock(&clt->paths_mutex);
2094 list_del_rcu(&sess->s.entry);
2096 /* Make sure everybody observes path removal. */
2100 * At this point nobody sees @sess in the list, but still we have
2101 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2102 * nobody can observe @sess in the list, we guarantee that IO path
2103 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2104 * to @sess, but can never again become @sess.
2108 * Decrement paths number only after grace period, because
2109 * caller of do_each_path() must firstly observe list without
2110 * path and only then decremented paths number.
2112 * Otherwise there can be the following situation:
2113 * o Two paths exist and IO is coming.
2114 * o One path is removed:
2116 * do_each_path(): rtrs_clt_remove_path_from_arr():
2117 * path = get_next_path()
2118 * ^^^ list_del_rcu(path)
2119 * [!CONNECTED path] clt->paths_num--
2121 * load clt->paths_num from 2 to 1
2125 * path is observed as !CONNECTED, but do_each_path() loop
2126 * ends, because expression i < clt->paths_num is false.
2131 * Get @next connection from current @sess which is going to be
2132 * removed. If @sess is the last element, then @next is NULL.
2135 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
2136 typeof(*next), s.entry);
2140 * @pcpu paths can still point to the path which is going to be
2141 * removed, so change the pointer manually.
2143 for_each_possible_cpu(cpu) {
2144 struct rtrs_clt_sess __rcu **ppcpu_path;
2146 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2147 if (rcu_dereference_protected(*ppcpu_path,
2148 lockdep_is_held(&clt->paths_mutex)) != sess)
2150 * synchronize_rcu() was called just after deleting
2151 * entry from the list, thus IO code path cannot
2152 * change pointer back to the pointer which is going
2153 * to be removed, we are safe here.
2158 * We race with IO code path, which also changes pointer,
2159 * thus we have to be careful not to overwrite it.
2161 if (xchg_sessions(ppcpu_path, sess, next))
2163 * @ppcpu_path was successfully replaced with @next,
2164 * that means that someone could also pick up the
2165 * @sess and dereferencing it right now, so wait for
2166 * a grace period is required.
2168 wait_for_grace = true;
2173 mutex_unlock(&clt->paths_mutex);
2176 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
2178 struct rtrs_clt *clt = sess->clt;
2180 mutex_lock(&clt->paths_mutex);
2183 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2184 mutex_unlock(&clt->paths_mutex);
2187 static void rtrs_clt_close_work(struct work_struct *work)
2189 struct rtrs_clt_sess *sess;
2191 sess = container_of(work, struct rtrs_clt_sess, close_work);
2193 cancel_delayed_work_sync(&sess->reconnect_dwork);
2194 rtrs_clt_stop_and_destroy_conns(sess);
2195 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
2198 static int init_conns(struct rtrs_clt_sess *sess)
2204 * On every new session connections increase reconnect counter
2205 * to avoid clashes with previous sessions not yet closed
2206 * sessions on a server side.
2208 sess->s.recon_cnt++;
2210 /* Establish all RDMA connections */
2211 for (cid = 0; cid < sess->s.con_num; cid++) {
2212 err = create_con(sess, cid);
2216 err = create_cm(to_clt_con(sess->s.con[cid]));
2218 destroy_con(to_clt_con(sess->s.con[cid]));
2222 err = alloc_sess_reqs(sess);
2226 rtrs_clt_start_hb(sess);
2232 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2236 mutex_lock(&con->con_mutex);
2237 destroy_con_cq_qp(con);
2238 mutex_unlock(&con->con_mutex);
2243 * If we've never taken async path and got an error, say,
2244 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2245 * manually to keep reconnecting.
2247 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2252 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2254 struct rtrs_clt_con *con = cq->cq_context;
2255 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2258 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2259 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2261 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2262 rtrs_err(sess->clt, "Sess info request send failed: %s\n",
2263 ib_wc_status_msg(wc->status));
2264 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2268 rtrs_clt_update_wc_stats(con);
2271 static int process_info_rsp(struct rtrs_clt_sess *sess,
2272 const struct rtrs_msg_info_rsp *msg)
2274 unsigned int sg_cnt, total_len;
2277 sg_cnt = le16_to_cpu(msg->sg_cnt);
2278 if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
2279 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
2285 * Check if IB immediate data size is enough to hold the mem_id and
2286 * the offset inside the memory chunk.
2288 if (unlikely((ilog2(sg_cnt - 1) + 1) +
2289 (ilog2(sess->chunk_size - 1) + 1) >
2290 MAX_IMM_PAYL_BITS)) {
2292 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2293 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
2297 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
2298 const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2302 addr = le64_to_cpu(desc->addr);
2303 rkey = le32_to_cpu(desc->key);
2304 len = le32_to_cpu(desc->len);
2308 if (unlikely(!len || (len % sess->chunk_size))) {
2309 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
2313 for ( ; len && i < sess->queue_depth; i++) {
2314 sess->rbufs[i].addr = addr;
2315 sess->rbufs[i].rkey = rkey;
2317 len -= sess->chunk_size;
2318 addr += sess->chunk_size;
2322 if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
2323 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
2326 if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
2327 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
2334 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2336 struct rtrs_clt_con *con = cq->cq_context;
2337 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2338 struct rtrs_msg_info_rsp *msg;
2339 enum rtrs_clt_state state;
2344 state = RTRS_CLT_CONNECTING_ERR;
2346 WARN_ON(con->c.cid);
2347 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2348 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2349 rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
2350 ib_wc_status_msg(wc->status));
2353 WARN_ON(wc->opcode != IB_WC_RECV);
2355 if (unlikely(wc->byte_len < sizeof(*msg))) {
2356 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2360 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
2361 iu->size, DMA_FROM_DEVICE);
2363 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
2364 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
2365 le16_to_cpu(msg->type));
2368 rx_sz = sizeof(*msg);
2369 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2370 if (unlikely(wc->byte_len < rx_sz)) {
2371 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2375 err = process_info_rsp(sess, msg);
2379 err = post_recv_sess(sess);
2383 state = RTRS_CLT_CONNECTED;
2386 rtrs_clt_update_wc_stats(con);
2387 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2388 rtrs_clt_change_state_get_old(sess, state, NULL);
2391 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
2393 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
2394 struct rtrs_msg_info_req *msg;
2395 struct rtrs_iu *tx_iu, *rx_iu;
2399 rx_sz = sizeof(struct rtrs_msg_info_rsp);
2400 rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
2402 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2403 sess->s.dev->ib_dev, DMA_TO_DEVICE,
2404 rtrs_clt_info_req_done);
2405 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
2406 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2407 if (unlikely(!tx_iu || !rx_iu)) {
2411 /* Prepare for getting info response */
2412 err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2413 if (unlikely(err)) {
2414 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2420 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2421 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
2423 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
2424 tx_iu->size, DMA_TO_DEVICE);
2426 /* Send info request */
2427 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2428 if (unlikely(err)) {
2429 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
2434 /* Wait for state change */
2435 wait_event_interruptible_timeout(sess->state_wq,
2436 sess->state != RTRS_CLT_CONNECTING,
2438 RTRS_CONNECT_TIMEOUT_MS));
2439 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
2440 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
2448 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
2450 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
2452 /* If we've never taken async path because of malloc problems */
2453 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2459 * init_sess() - establishes all session connections and does handshake
2460 * @sess: client session.
2461 * In case of error full close or reconnect procedure should be taken,
2462 * because reconnect or close async works can be started.
2464 static int init_sess(struct rtrs_clt_sess *sess)
2468 mutex_lock(&sess->init_mutex);
2469 err = init_conns(sess);
2471 rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
2474 err = rtrs_send_sess_info(sess);
2476 rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
2479 rtrs_clt_sess_up(sess);
2481 mutex_unlock(&sess->init_mutex);
2486 static void rtrs_clt_reconnect_work(struct work_struct *work)
2488 struct rtrs_clt_sess *sess;
2489 struct rtrs_clt *clt;
2490 unsigned int delay_ms;
2493 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
2497 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
2500 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
2501 /* Close a session completely if max attempts is reached */
2502 rtrs_clt_close_conns(sess, false);
2505 sess->reconnect_attempts++;
2507 /* Stop everything */
2508 rtrs_clt_stop_and_destroy_conns(sess);
2509 msleep(RTRS_RECONNECT_BACKOFF);
2510 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
2511 err = init_sess(sess);
2513 goto reconnect_again;
2519 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
2520 sess->stats->reconnects.fail_cnt++;
2521 delay_ms = clt->reconnect_delay_sec * 1000;
2522 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
2523 msecs_to_jiffies(delay_ms +
2525 RTRS_RECONNECT_SEED));
2529 static void rtrs_clt_dev_release(struct device *dev)
2531 struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
2536 static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
2537 u16 port, size_t pdu_sz, void *priv,
2538 void (*link_ev)(void *priv,
2539 enum rtrs_clt_link_ev ev),
2540 unsigned int max_segments,
2541 unsigned int reconnect_delay_sec,
2542 unsigned int max_reconnect_attempts)
2544 struct rtrs_clt *clt;
2547 if (!paths_num || paths_num > MAX_PATHS_NUM)
2548 return ERR_PTR(-EINVAL);
2550 if (strlen(sessname) >= sizeof(clt->sessname))
2551 return ERR_PTR(-EINVAL);
2553 clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2555 return ERR_PTR(-ENOMEM);
2557 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2558 if (!clt->pcpu_path) {
2560 return ERR_PTR(-ENOMEM);
2563 uuid_gen(&clt->paths_uuid);
2564 INIT_LIST_HEAD_RCU(&clt->paths_list);
2565 clt->paths_num = paths_num;
2566 clt->paths_up = MAX_PATHS_NUM;
2568 clt->pdu_sz = pdu_sz;
2569 clt->max_segments = max_segments;
2570 clt->reconnect_delay_sec = reconnect_delay_sec;
2571 clt->max_reconnect_attempts = max_reconnect_attempts;
2573 clt->link_ev = link_ev;
2574 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2575 strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
2576 init_waitqueue_head(&clt->permits_wait);
2577 mutex_init(&clt->paths_ev_mutex);
2578 mutex_init(&clt->paths_mutex);
2580 clt->dev.class = rtrs_clt_dev_class;
2581 clt->dev.release = rtrs_clt_dev_release;
2582 err = dev_set_name(&clt->dev, "%s", sessname);
2586 * Suppress user space notification until
2587 * sysfs files are created
2589 dev_set_uevent_suppress(&clt->dev, true);
2590 err = device_register(&clt->dev);
2592 put_device(&clt->dev);
2596 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2597 if (!clt->kobj_paths) {
2601 err = rtrs_clt_create_sysfs_root_files(clt);
2603 kobject_del(clt->kobj_paths);
2604 kobject_put(clt->kobj_paths);
2607 dev_set_uevent_suppress(&clt->dev, false);
2608 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2612 device_unregister(&clt->dev);
2614 free_percpu(clt->pcpu_path);
2616 return ERR_PTR(err);
2619 static void free_clt(struct rtrs_clt *clt)
2622 free_percpu(clt->pcpu_path);
2623 mutex_destroy(&clt->paths_ev_mutex);
2624 mutex_destroy(&clt->paths_mutex);
2625 /* release callback will free clt in last put */
2626 device_unregister(&clt->dev);
2630 * rtrs_clt_open() - Open a session to an RTRS server
2631 * @ops: holds the link event callback and the private pointer.
2632 * @sessname: name of the session
2633 * @paths: Paths to be established defined by their src and dst addresses
2634 * @paths_num: Number of elements in the @paths array
2635 * @port: port to be used by the RTRS session
2636 * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2637 * @reconnect_delay_sec: time between reconnect tries
2638 * @max_segments: Max. number of segments per IO request
2639 * @max_reconnect_attempts: Number of times to reconnect on error before giving
2640 * up, 0 for * disabled, -1 for forever
2641 * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
2643 * Starts session establishment with the rtrs_server. The function can block
2644 * up to ~2000ms before it returns.
2646 * Return a valid pointer on success otherwise PTR_ERR.
2648 struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
2649 const char *sessname,
2650 const struct rtrs_addr *paths,
2651 size_t paths_num, u16 port,
2652 size_t pdu_sz, u8 reconnect_delay_sec,
2654 s16 max_reconnect_attempts, u32 nr_poll_queues)
2656 struct rtrs_clt_sess *sess, *tmp;
2657 struct rtrs_clt *clt;
2660 clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
2662 max_segments, reconnect_delay_sec,
2663 max_reconnect_attempts);
2668 for (i = 0; i < paths_num; i++) {
2669 struct rtrs_clt_sess *sess;
2671 sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
2672 max_segments, nr_poll_queues);
2674 err = PTR_ERR(sess);
2675 goto close_all_sess;
2678 sess->for_new_clt = 1;
2679 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2681 err = init_sess(sess);
2683 list_del_rcu(&sess->s.entry);
2684 rtrs_clt_close_conns(sess, true);
2686 goto close_all_sess;
2689 err = rtrs_clt_create_sess_files(sess);
2691 list_del_rcu(&sess->s.entry);
2692 rtrs_clt_close_conns(sess, true);
2694 goto close_all_sess;
2697 err = alloc_permits(clt);
2699 goto close_all_sess;
2704 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2705 rtrs_clt_destroy_sess_files(sess, NULL);
2706 rtrs_clt_close_conns(sess, true);
2707 kobject_put(&sess->kobj);
2709 rtrs_clt_destroy_sysfs_root(clt);
2713 return ERR_PTR(err);
2715 EXPORT_SYMBOL(rtrs_clt_open);
2718 * rtrs_clt_close() - Close a session
2719 * @clt: Session handle. Session is freed upon return.
2721 void rtrs_clt_close(struct rtrs_clt *clt)
2723 struct rtrs_clt_sess *sess, *tmp;
2725 /* Firstly forbid sysfs access */
2726 rtrs_clt_destroy_sysfs_root(clt);
2728 /* Now it is safe to iterate over all paths without locks */
2729 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2730 rtrs_clt_close_conns(sess, true);
2731 rtrs_clt_destroy_sess_files(sess, NULL);
2732 kobject_put(&sess->kobj);
2736 EXPORT_SYMBOL(rtrs_clt_close);
2738 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
2740 enum rtrs_clt_state old_state;
2744 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
2747 sess->reconnect_attempts = 0;
2748 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
2750 if (changed || old_state == RTRS_CLT_RECONNECTING) {
2752 * flush_delayed_work() queues pending work for immediate
2753 * execution, so do the flush if we have queued something
2754 * right now or work is pending.
2756 flush_delayed_work(&sess->reconnect_dwork);
2757 err = (READ_ONCE(sess->state) ==
2758 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2764 int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
2766 rtrs_clt_close_conns(sess, true);
2771 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
2772 const struct attribute *sysfs_self)
2774 enum rtrs_clt_state old_state;
2778 * Continue stopping path till state was changed to DEAD or
2779 * state was observed as DEAD:
2780 * 1. State was changed to DEAD - we were fast and nobody
2781 * invoked rtrs_clt_reconnect(), which can again start
2783 * 2. State was observed as DEAD - we have someone in parallel
2784 * removing the path.
2787 rtrs_clt_close_conns(sess, true);
2788 changed = rtrs_clt_change_state_get_old(sess,
2791 } while (!changed && old_state != RTRS_CLT_DEAD);
2793 if (likely(changed)) {
2794 rtrs_clt_destroy_sess_files(sess, sysfs_self);
2795 rtrs_clt_remove_path_from_arr(sess);
2796 kobject_put(&sess->kobj);
2802 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
2804 clt->max_reconnect_attempts = (unsigned int)value;
2807 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
2809 return (int)clt->max_reconnect_attempts;
2813 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2816 * @ops: callback function to be called as confirmation, and the pointer.
2818 * @permit: Preallocated permit
2819 * @vec: Message that is sent to server together with the request.
2820 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2821 * Since the msg is copied internally it can be allocated on stack.
2822 * @nr: Number of elements in @vec.
2823 * @data_len: length of data sent to/from server
2824 * @sg: Pages to be sent/received to/from server.
2825 * @sg_cnt: Number of elements in the @sg
2831 * On dir=READ rtrs client will request a data transfer from Server to client.
2832 * The data that the server will respond with will be stored in @sg when
2833 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2834 * On dir=WRITE rtrs client will rdma write data in sg to server side.
2836 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2837 struct rtrs_clt *clt, struct rtrs_permit *permit,
2838 const struct kvec *vec, size_t nr, size_t data_len,
2839 struct scatterlist *sg, unsigned int sg_cnt)
2841 struct rtrs_clt_io_req *req;
2842 struct rtrs_clt_sess *sess;
2844 enum dma_data_direction dma_dir;
2845 int err = -ECONNABORTED, i;
2846 size_t usr_len, hdr_len;
2849 /* Get kvec length */
2850 for (i = 0, usr_len = 0; i < nr; i++)
2851 usr_len += vec[i].iov_len;
2854 hdr_len = sizeof(struct rtrs_msg_rdma_read) +
2855 sg_cnt * sizeof(struct rtrs_sg_desc);
2856 dma_dir = DMA_FROM_DEVICE;
2858 hdr_len = sizeof(struct rtrs_msg_rdma_write);
2859 dma_dir = DMA_TO_DEVICE;
2863 for (path_it_init(&it, clt);
2864 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2865 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
2868 if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
2869 rtrs_wrn_rl(sess->clt,
2870 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
2871 dir == READ ? "Read" : "Write",
2872 usr_len, hdr_len, sess->max_hdr_size);
2876 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
2877 vec, usr_len, sg, sg_cnt, data_len,
2880 err = rtrs_clt_read_req(req);
2882 err = rtrs_clt_write_req(req);
2883 if (unlikely(err)) {
2884 req->in_use = false;
2890 path_it_deinit(&it);
2895 EXPORT_SYMBOL(rtrs_clt_request);
2897 int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
2900 struct rtrs_con *con;
2901 struct rtrs_clt_sess *sess;
2905 for (path_it_init(&it, clt);
2906 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2907 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
2910 con = sess->s.con[index + 1];
2911 cnt = ib_process_cq_direct(con->cq, -1);
2915 path_it_deinit(&it);
2920 EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
2923 * rtrs_clt_query() - queries RTRS session attributes
2924 *@clt: session pointer
2925 *@attr: query results for session attributes.
2928 * -ECOMM no connection to the server
2930 int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
2932 if (!rtrs_clt_is_connected(clt))
2935 attr->queue_depth = clt->queue_depth;
2936 attr->max_io_size = clt->max_io_size;
2937 attr->sess_kobj = &clt->dev.kobj;
2938 strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
2942 EXPORT_SYMBOL(rtrs_clt_query);
2944 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
2945 struct rtrs_addr *addr)
2947 struct rtrs_clt_sess *sess;
2950 sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, 0);
2952 return PTR_ERR(sess);
2955 * It is totally safe to add path in CONNECTING state: coming
2956 * IO will never grab it. Also it is very important to add
2957 * path before init, since init fires LINK_CONNECTED event.
2959 rtrs_clt_add_path_to_arr(sess);
2961 err = init_sess(sess);
2965 err = rtrs_clt_create_sess_files(sess);
2972 rtrs_clt_remove_path_from_arr(sess);
2973 rtrs_clt_close_conns(sess, true);
2979 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
2981 if (!(dev->ib_dev->attrs.device_cap_flags &
2982 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
2983 pr_err("Memory registrations not supported.\n");
2990 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
2991 .init = rtrs_clt_ib_dev_init
2994 static int __init rtrs_client_init(void)
2996 rtrs_rdma_dev_pd_init(0, &dev_pd);
2998 rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
2999 if (IS_ERR(rtrs_clt_dev_class)) {
3000 pr_err("Failed to create rtrs-client dev class\n");
3001 return PTR_ERR(rtrs_clt_dev_class);
3003 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3005 class_destroy(rtrs_clt_dev_class);
3012 static void __exit rtrs_client_exit(void)
3014 destroy_workqueue(rtrs_wq);
3015 class_destroy(rtrs_clt_dev_class);
3016 rtrs_rdma_dev_pd_deinit(&dev_pd);
3019 module_init(rtrs_client_init);
3020 module_exit(rtrs_client_exit);