1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
20 #define RTRS_CONNECT_TIMEOUT_MS 30000
22 * Wait a bit before trying to reconnect after a failure
23 * in order to give server time to finish clean up which
24 * leads to "false positives" failed reconnect attempts
26 #define RTRS_RECONNECT_BACKOFF 1000
28 * Wait for additional random time between 0 and 8 seconds
29 * before starting to reconnect to avoid clients reconnecting
30 * all at once in case of a major network outage
32 #define RTRS_RECONNECT_SEED 8
34 #define FIRST_CONN 0x01
35 /* limit to 128 * 4k = 512k max IO */
36 #define RTRS_MAX_SEGMENTS 128
38 MODULE_DESCRIPTION("RDMA Transport Client");
39 MODULE_LICENSE("GPL");
41 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
42 static struct rtrs_rdma_dev_pd dev_pd = {
46 static struct workqueue_struct *rtrs_wq;
47 static struct class *rtrs_clt_dev_class;
49 static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
51 struct rtrs_clt_sess *sess;
52 bool connected = false;
55 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
56 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
62 static struct rtrs_permit *
63 __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
65 size_t max_depth = clt->queue_depth;
66 struct rtrs_permit *permit;
70 * Adapted from null_blk get_tag(). Callers from different cpus may
71 * grab the same bit, since find_first_zero_bit is not atomic.
72 * But then the test_and_set_bit_lock will fail for all the
73 * callers but one, so that they will loop again.
74 * This way an explicit spinlock is not required.
77 bit = find_first_zero_bit(clt->permits_map, max_depth);
80 } while (test_and_set_bit_lock(bit, clt->permits_map));
82 permit = get_permit(clt, bit);
83 WARN_ON(permit->mem_id != bit);
84 permit->cpu_id = raw_smp_processor_id();
85 permit->con_type = con_type;
90 static inline void __rtrs_put_permit(struct rtrs_clt *clt,
91 struct rtrs_permit *permit)
93 clear_bit_unlock(permit->mem_id, clt->permits_map);
97 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
98 * @clt: Current session
99 * @con_type: Type of connection to use with the permit
100 * @can_wait: Wait type
103 * Allocates permit for the following RDMA operation. Permit is used
104 * to preallocate all resources and to propagate memory pressure
108 * Can sleep if @wait == RTRS_PERMIT_WAIT
110 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
111 enum rtrs_clt_con_type con_type,
112 enum wait_type can_wait)
114 struct rtrs_permit *permit;
117 permit = __rtrs_get_permit(clt, con_type);
118 if (permit || !can_wait)
122 prepare_to_wait(&clt->permits_wait, &wait,
123 TASK_UNINTERRUPTIBLE);
124 permit = __rtrs_get_permit(clt, con_type);
131 finish_wait(&clt->permits_wait, &wait);
135 EXPORT_SYMBOL(rtrs_clt_get_permit);
138 * rtrs_clt_put_permit() - puts allocated permit
139 * @clt: Current session
140 * @permit: Permit to be freed
145 void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
147 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
150 __rtrs_put_permit(clt, permit);
153 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
154 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
155 * it must have added itself to &clt->permits_wait before
156 * __rtrs_put_permit() finished.
157 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
159 if (waitqueue_active(&clt->permits_wait))
160 wake_up(&clt->permits_wait);
162 EXPORT_SYMBOL(rtrs_clt_put_permit);
165 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
166 * @sess: client session pointer
167 * @permit: permit for the allocation of the RDMA buffer
169 * IO connection starts from 1.
170 * 0 connection is for user messages.
173 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
174 struct rtrs_permit *permit)
178 if (permit->con_type == RTRS_IO_CON)
179 id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
181 return to_clt_con(sess->s.con[id]);
185 * rtrs_clt_change_state() - change the session state through session state
188 * @sess: client session to change the state of.
189 * @new_state: state to change to.
191 * returns true if sess's state is changed to new state, otherwise return false.
194 * state_wq lock must be hold.
196 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
197 enum rtrs_clt_state new_state)
199 enum rtrs_clt_state old_state;
200 bool changed = false;
202 lockdep_assert_held(&sess->state_wq.lock);
204 old_state = sess->state;
206 case RTRS_CLT_CONNECTING:
208 case RTRS_CLT_RECONNECTING:
215 case RTRS_CLT_RECONNECTING:
217 case RTRS_CLT_CONNECTED:
218 case RTRS_CLT_CONNECTING_ERR:
219 case RTRS_CLT_CLOSED:
226 case RTRS_CLT_CONNECTED:
228 case RTRS_CLT_CONNECTING:
235 case RTRS_CLT_CONNECTING_ERR:
237 case RTRS_CLT_CONNECTING:
244 case RTRS_CLT_CLOSING:
246 case RTRS_CLT_CONNECTING:
247 case RTRS_CLT_CONNECTING_ERR:
248 case RTRS_CLT_RECONNECTING:
249 case RTRS_CLT_CONNECTED:
256 case RTRS_CLT_CLOSED:
258 case RTRS_CLT_CLOSING:
267 case RTRS_CLT_CLOSED:
278 sess->state = new_state;
279 wake_up_locked(&sess->state_wq);
285 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
286 enum rtrs_clt_state old_state,
287 enum rtrs_clt_state new_state)
289 bool changed = false;
291 spin_lock_irq(&sess->state_wq.lock);
292 if (sess->state == old_state)
293 changed = rtrs_clt_change_state(sess, new_state);
294 spin_unlock_irq(&sess->state_wq.lock);
299 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
301 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
303 if (rtrs_clt_change_state_from_to(sess,
305 RTRS_CLT_RECONNECTING)) {
306 struct rtrs_clt *clt = sess->clt;
307 unsigned int delay_ms;
310 * Normal scenario, reconnect if we were successfully connected
312 delay_ms = clt->reconnect_delay_sec * 1000;
313 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
314 msecs_to_jiffies(delay_ms +
315 prandom_u32() % RTRS_RECONNECT_SEED));
318 * Error can happen just on establishing new connection,
319 * so notify waiter with error state, waiter is responsible
320 * for cleaning the rest and reconnect if needed.
322 rtrs_clt_change_state_from_to(sess,
324 RTRS_CLT_CONNECTING_ERR);
328 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
330 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
332 if (wc->status != IB_WC_SUCCESS) {
333 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
334 ib_wc_status_msg(wc->status));
335 rtrs_rdma_error_recovery(con);
339 static struct ib_cqe fast_reg_cqe = {
340 .done = rtrs_clt_fast_reg_done
343 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
344 bool notify, bool can_wait);
346 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
348 struct rtrs_clt_io_req *req =
349 container_of(wc->wr_cqe, typeof(*req), inv_cqe);
350 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
352 if (wc->status != IB_WC_SUCCESS) {
353 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
354 ib_wc_status_msg(wc->status));
355 rtrs_rdma_error_recovery(con);
357 req->need_inv = false;
358 if (req->need_inv_comp)
359 complete(&req->inv_comp);
361 /* Complete request from INV callback */
362 complete_rdma_req(req, req->inv_errno, true, false);
365 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
367 struct rtrs_clt_con *con = req->con;
368 struct ib_send_wr wr = {
369 .opcode = IB_WR_LOCAL_INV,
370 .wr_cqe = &req->inv_cqe,
371 .send_flags = IB_SEND_SIGNALED,
372 .ex.invalidate_rkey = req->mr->rkey,
374 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
376 return ib_post_send(con->c.qp, &wr, NULL);
379 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
380 bool notify, bool can_wait)
382 struct rtrs_clt_con *con = req->con;
383 struct rtrs_clt_sess *sess;
386 if (WARN_ON(!req->in_use))
388 if (WARN_ON(!req->con))
390 sess = to_clt_sess(con->c.sess);
393 if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
395 * We are here to invalidate read requests
396 * ourselves. In normal scenario server should
397 * send INV for all read requests, but
398 * we are here, thus two things could happen:
400 * 1. this is failover, when errno != 0
403 * 2. something totally bad happened and
404 * server forgot to send INV, so we
405 * should do that ourselves.
409 req->need_inv_comp = true;
411 /* This should be IO path, so always notify */
413 /* Save errno for INV callback */
414 req->inv_errno = errno;
417 refcount_inc(&req->ref);
418 err = rtrs_inv_rkey(req);
420 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
422 } else if (can_wait) {
423 wait_for_completion(&req->inv_comp);
426 * Something went wrong, so request will be
427 * completed from INV callback.
433 if (!refcount_dec_and_test(&req->ref))
436 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
437 req->sg_cnt, req->dir);
439 if (!refcount_dec_and_test(&req->ref))
441 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
442 atomic_dec(&sess->stats->inflight);
448 rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
449 errno, kobject_name(&sess->kobj), sess->hca_name,
450 sess->hca_port, notify);
454 req->conf(req->priv, errno);
457 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
458 struct rtrs_clt_io_req *req,
459 struct rtrs_rbuf *rbuf, u32 off,
460 u32 imm, struct ib_send_wr *wr)
462 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
463 enum ib_send_flags flags;
467 rtrs_wrn(con->c.sess,
468 "Doing RDMA Write failed, no data supplied\n");
472 /* user data and user message in the first list element */
473 sge.addr = req->iu->dma_addr;
474 sge.length = req->sg_size;
475 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
478 * From time to time we have to post signalled sends,
479 * or send queue will fill up and only QP reset can help.
481 flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
482 0 : IB_SEND_SIGNALED;
484 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
485 req->sg_size, DMA_TO_DEVICE);
487 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
488 rbuf->rkey, rbuf->addr + off,
489 imm, flags, wr, NULL);
492 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
493 s16 errno, bool w_inval)
495 struct rtrs_clt_io_req *req;
497 if (WARN_ON(msg_id >= sess->queue_depth))
500 req = &sess->reqs[msg_id];
501 /* Drop need_inv if server responded with send with invalidation */
502 req->need_inv &= !w_inval;
503 complete_rdma_req(req, errno, true, false);
506 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
510 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
512 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
513 iu = container_of(wc->wr_cqe, struct rtrs_iu,
515 err = rtrs_iu_post_recv(&con->c, iu);
517 rtrs_err(con->c.sess, "post iu failed %d\n", err);
518 rtrs_rdma_error_recovery(con);
522 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
524 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
525 struct rtrs_msg_rkey_rsp *msg;
526 u32 imm_type, imm_payload;
527 bool w_inval = false;
532 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
534 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
536 if (wc->byte_len < sizeof(*msg)) {
537 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
541 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
542 iu->size, DMA_FROM_DEVICE);
544 if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
545 rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
546 le16_to_cpu(msg->type));
549 buf_id = le16_to_cpu(msg->buf_id);
550 if (WARN_ON(buf_id >= sess->queue_depth))
553 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
554 if (imm_type == RTRS_IO_RSP_IMM ||
555 imm_type == RTRS_IO_RSP_W_INV_IMM) {
558 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
559 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
561 if (WARN_ON(buf_id != msg_id))
563 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
564 process_io_rsp(sess, msg_id, err, w_inval);
566 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
567 iu->size, DMA_FROM_DEVICE);
568 return rtrs_clt_recv_done(con, wc);
570 rtrs_rdma_error_recovery(con);
573 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
575 static struct ib_cqe io_comp_cqe = {
576 .done = rtrs_clt_rdma_done
580 * Post x2 empty WRs: first is for this RDMA with IMM,
581 * second is for RECV with INV, which happened earlier.
583 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
585 struct ib_recv_wr wr_arr[2], *wr;
588 memset(wr_arr, 0, sizeof(wr_arr));
589 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
593 /* Chain backwards */
594 wr->next = &wr_arr[i - 1];
597 return ib_post_recv(con->qp, wr, NULL);
600 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
602 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
603 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
604 u32 imm_type, imm_payload;
605 bool w_inval = false;
608 if (wc->status != IB_WC_SUCCESS) {
609 if (wc->status != IB_WC_WR_FLUSH_ERR) {
610 rtrs_err(sess->clt, "RDMA failed: %s\n",
611 ib_wc_status_msg(wc->status));
612 rtrs_rdma_error_recovery(con);
616 rtrs_clt_update_wc_stats(con);
618 switch (wc->opcode) {
619 case IB_WC_RECV_RDMA_WITH_IMM:
621 * post_recv() RDMA write completions of IO reqs (read/write)
624 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
626 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
627 &imm_type, &imm_payload);
628 if (imm_type == RTRS_IO_RSP_IMM ||
629 imm_type == RTRS_IO_RSP_W_INV_IMM) {
632 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
633 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
635 process_io_rsp(sess, msg_id, err, w_inval);
636 } else if (imm_type == RTRS_HB_MSG_IMM) {
638 rtrs_send_hb_ack(&sess->s);
639 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
640 return rtrs_clt_recv_done(con, wc);
641 } else if (imm_type == RTRS_HB_ACK_IMM) {
643 sess->s.hb_missed_cnt = 0;
644 sess->s.hb_cur_latency =
645 ktime_sub(ktime_get(), sess->s.hb_last_sent);
646 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
647 return rtrs_clt_recv_done(con, wc);
649 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
654 * Post x2 empty WRs: first is for this RDMA with IMM,
655 * second is for RECV with INV, which happened earlier.
657 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
659 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
661 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
663 rtrs_rdma_error_recovery(con);
668 * Key invalidations from server side
670 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
671 wc->wc_flags & IB_WC_WITH_IMM));
672 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
673 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
674 if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
675 return rtrs_clt_recv_done(con, wc);
677 return rtrs_clt_rkey_rsp_done(con, wc);
680 case IB_WC_RDMA_WRITE:
682 * post_send() RDMA write completions of IO reqs (read/write)
688 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
693 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
696 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
698 for (i = 0; i < q_size; i++) {
699 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
700 struct rtrs_iu *iu = &con->rsp_ius[i];
702 err = rtrs_iu_post_recv(&con->c, iu);
704 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
713 static int post_recv_sess(struct rtrs_clt_sess *sess)
718 for (cid = 0; cid < sess->s.con_num; cid++) {
720 q_size = SERVICE_CON_QUEUE_DEPTH;
722 q_size = sess->queue_depth;
725 * x2 for RDMA read responses + FR key invalidations,
726 * RDMA writes do not require any FR registrations.
730 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
732 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
742 struct list_head skip_list;
743 struct rtrs_clt *clt;
744 struct rtrs_clt_sess *(*next_path)(struct path_it *it);
748 * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
749 * @head: the head for the list.
750 * @ptr: the list head to take the next element from.
751 * @type: the type of the struct this is embedded in.
752 * @memb: the name of the list_head within the struct.
754 * Next element returned in round-robin fashion, i.e. head will be skipped,
755 * but if list is observed as empty, NULL will be returned.
757 * This primitive may safely run concurrently with the _rcu list-mutation
758 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
760 #define list_next_or_null_rr_rcu(head, ptr, type, memb) \
762 list_next_or_null_rcu(head, ptr, type, memb) ?: \
763 list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
768 * get_next_path_rr() - Returns path in round-robin fashion.
769 * @it: the path pointer
771 * Related to @MP_POLICY_RR
774 * rcu_read_lock() must be hold.
776 static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
778 struct rtrs_clt_sess __rcu **ppcpu_path;
779 struct rtrs_clt_sess *path;
780 struct rtrs_clt *clt;
785 * Here we use two RCU objects: @paths_list and @pcpu_path
786 * pointer. See rtrs_clt_remove_path_from_arr() for details
787 * how that is handled.
790 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
791 path = rcu_dereference(*ppcpu_path);
793 path = list_first_or_null_rcu(&clt->paths_list,
794 typeof(*path), s.entry);
796 path = list_next_or_null_rr_rcu(&clt->paths_list,
800 rcu_assign_pointer(*ppcpu_path, path);
806 * get_next_path_min_inflight() - Returns path with minimal inflight count.
807 * @it: the path pointer
809 * Related to @MP_POLICY_MIN_INFLIGHT
812 * rcu_read_lock() must be hold.
814 static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
816 struct rtrs_clt_sess *min_path = NULL;
817 struct rtrs_clt *clt = it->clt;
818 struct rtrs_clt_sess *sess;
819 int min_inflight = INT_MAX;
822 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
823 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
826 if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
829 inflight = atomic_read(&sess->stats->inflight);
831 if (inflight < min_inflight) {
832 min_inflight = inflight;
838 * add the path to the skip list, so that next time we can get
842 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
848 * get_next_path_min_latency() - Returns path with minimal latency.
849 * @it: the path pointer
851 * Return: a path with the lowest latency or NULL if all paths are tried
854 * rcu_read_lock() must be hold.
856 * Related to @MP_POLICY_MIN_LATENCY
858 * This DOES skip an already-tried path.
859 * There is a skip-list to skip a path if the path has tried but failed.
860 * It will try the minimum latency path and then the second minimum latency
861 * path and so on. Finally it will return NULL if all paths are tried.
862 * Therefore the caller MUST check the returned
863 * path is NULL and trigger the IO error.
865 static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
867 struct rtrs_clt_sess *min_path = NULL;
868 struct rtrs_clt *clt = it->clt;
869 struct rtrs_clt_sess *sess;
870 ktime_t min_latency = INT_MAX;
873 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
874 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
877 if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
880 latency = sess->s.hb_cur_latency;
882 if (latency < min_latency) {
883 min_latency = latency;
889 * add the path to the skip list, so that next time we can get
893 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
898 static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
900 INIT_LIST_HEAD(&it->skip_list);
904 if (clt->mp_policy == MP_POLICY_RR)
905 it->next_path = get_next_path_rr;
906 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
907 it->next_path = get_next_path_min_inflight;
909 it->next_path = get_next_path_min_latency;
912 static inline void path_it_deinit(struct path_it *it)
914 struct list_head *skip, *tmp;
916 * The skip_list is used only for the MIN_INFLIGHT policy.
917 * We need to remove paths from it, so that next IO can insert
918 * paths (->mp_skip_entry) into a skip_list again.
920 list_for_each_safe(skip, tmp, &it->skip_list)
925 * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
926 * about an inflight IO.
927 * The user buffer holding user control message (not data) is copied into
928 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
929 * also hold the control message of rtrs.
930 * @req: an io request holding information about IO.
931 * @sess: client session
932 * @conf: conformation callback function to notify upper layer.
933 * @permit: permit for allocation of RDMA remote buffer
934 * @priv: private pointer
935 * @vec: kernel vector containing control message
936 * @usr_len: length of the user message
937 * @sg: scater list for IO data
938 * @sg_cnt: number of scater list entries
939 * @data_len: length of the IO data
940 * @dir: direction of the IO.
942 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
943 struct rtrs_clt_sess *sess,
944 void (*conf)(void *priv, int errno),
945 struct rtrs_permit *permit, void *priv,
946 const struct kvec *vec, size_t usr_len,
947 struct scatterlist *sg, size_t sg_cnt,
948 size_t data_len, int dir)
950 struct iov_iter iter;
953 req->permit = permit;
955 req->usr_len = usr_len;
956 req->data_len = data_len;
958 req->sg_cnt = sg_cnt;
961 req->con = rtrs_permit_to_clt_con(sess, permit);
963 req->need_inv = false;
964 req->need_inv_comp = false;
966 refcount_set(&req->ref, 1);
968 iov_iter_kvec(&iter, READ, vec, 1, usr_len);
969 len = _copy_from_iter(req->iu->buf, usr_len, &iter);
970 WARN_ON(len != usr_len);
972 reinit_completion(&req->inv_comp);
975 static struct rtrs_clt_io_req *
976 rtrs_clt_get_req(struct rtrs_clt_sess *sess,
977 void (*conf)(void *priv, int errno),
978 struct rtrs_permit *permit, void *priv,
979 const struct kvec *vec, size_t usr_len,
980 struct scatterlist *sg, size_t sg_cnt,
981 size_t data_len, int dir)
983 struct rtrs_clt_io_req *req;
985 req = &sess->reqs[permit->mem_id];
986 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
987 sg, sg_cnt, data_len, dir);
991 static struct rtrs_clt_io_req *
992 rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
993 struct rtrs_clt_io_req *fail_req)
995 struct rtrs_clt_io_req *req;
997 .iov_base = fail_req->iu->buf,
998 .iov_len = fail_req->usr_len
1001 req = &alive_sess->reqs[fail_req->permit->mem_id];
1002 rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
1003 fail_req->priv, &vec, fail_req->usr_len,
1004 fail_req->sglist, fail_req->sg_cnt,
1005 fail_req->data_len, fail_req->dir);
1009 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1010 struct rtrs_clt_io_req *req,
1011 struct rtrs_rbuf *rbuf, bool fr_en,
1012 u32 size, u32 imm, struct ib_send_wr *wr,
1013 struct ib_send_wr *tail)
1015 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1016 struct ib_sge *sge = req->sge;
1017 enum ib_send_flags flags;
1018 struct scatterlist *sg;
1021 struct ib_send_wr *ptail = NULL;
1025 sge[i].addr = req->mr->iova;
1026 sge[i].length = req->mr->length;
1027 sge[i].lkey = req->mr->lkey;
1032 for_each_sg(req->sglist, sg, req->sg_cnt, i) {
1033 sge[i].addr = sg_dma_address(sg);
1034 sge[i].length = sg_dma_len(sg);
1035 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
1037 num_sge = 1 + req->sg_cnt;
1039 sge[i].addr = req->iu->dma_addr;
1040 sge[i].length = size;
1041 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
1044 * From time to time we have to post signalled sends,
1045 * or send queue will fill up and only QP reset can help.
1047 flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
1048 0 : IB_SEND_SIGNALED;
1050 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
1051 size, DMA_TO_DEVICE);
1053 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1054 rbuf->rkey, rbuf->addr, imm,
1058 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1062 /* Align the MR to a 4K page size to match the block virt boundary */
1063 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1066 if (nr < req->sg_cnt)
1068 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1073 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
1075 struct rtrs_clt_con *con = req->con;
1076 struct rtrs_sess *s = con->c.sess;
1077 struct rtrs_clt_sess *sess = to_clt_sess(s);
1078 struct rtrs_msg_rdma_write *msg;
1080 struct rtrs_rbuf *rbuf;
1083 struct ib_reg_wr rwr;
1084 struct ib_send_wr inv_wr;
1085 struct ib_send_wr *wr = NULL;
1088 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1090 if (tsize > sess->chunk_size) {
1091 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
1092 tsize, sess->chunk_size);
1096 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
1097 req->sg_cnt, req->dir);
1099 rtrs_wrn(s, "Write request failed, map failed\n");
1103 /* put rtrs msg after sg and user message */
1104 msg = req->iu->buf + req->usr_len;
1105 msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1106 msg->usr_len = cpu_to_le16(req->usr_len);
1108 /* rtrs message on server side will be after user data and message */
1109 imm = req->permit->mem_off + req->data_len + req->usr_len;
1110 imm = rtrs_to_io_req_imm(imm);
1111 buf_id = req->permit->mem_id;
1112 req->sg_size = tsize;
1113 rbuf = &sess->rbufs[buf_id];
1116 ret = rtrs_map_sg_fr(req, count);
1119 "Write request failed, failed to map fast reg. data, err: %d\n",
1121 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1122 req->sg_cnt, req->dir);
1125 inv_wr = (struct ib_send_wr) {
1126 .opcode = IB_WR_LOCAL_INV,
1127 .wr_cqe = &req->inv_cqe,
1128 .send_flags = IB_SEND_SIGNALED,
1129 .ex.invalidate_rkey = req->mr->rkey,
1131 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
1132 rwr = (struct ib_reg_wr) {
1133 .wr.opcode = IB_WR_REG_MR,
1134 .wr.wr_cqe = &fast_reg_cqe,
1136 .key = req->mr->rkey,
1137 .access = (IB_ACCESS_LOCAL_WRITE),
1141 refcount_inc(&req->ref);
1144 * Update stats now, after request is successfully sent it is not
1145 * safe anymore to touch it.
1147 rtrs_clt_update_all_stats(req, WRITE);
1149 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
1150 req->usr_len + sizeof(*msg),
1154 "Write request failed: error=%d path=%s [%s:%u]\n",
1155 ret, kobject_name(&sess->kobj), sess->hca_name,
1157 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1158 atomic_dec(&sess->stats->inflight);
1160 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1161 req->sg_cnt, req->dir);
1167 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1169 struct rtrs_clt_con *con = req->con;
1170 struct rtrs_sess *s = con->c.sess;
1171 struct rtrs_clt_sess *sess = to_clt_sess(s);
1172 struct rtrs_msg_rdma_read *msg;
1173 struct rtrs_ib_dev *dev = sess->s.dev;
1175 struct ib_reg_wr rwr;
1176 struct ib_send_wr *wr = NULL;
1181 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1183 if (tsize > sess->chunk_size) {
1185 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1186 tsize, sess->chunk_size);
1191 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1195 "Read request failed, dma map failed\n");
1199 /* put our message into req->buf after user message*/
1200 msg = req->iu->buf + req->usr_len;
1201 msg->type = cpu_to_le16(RTRS_MSG_READ);
1202 msg->usr_len = cpu_to_le16(req->usr_len);
1205 ret = rtrs_map_sg_fr(req, count);
1208 "Read request failed, failed to map fast reg. data, err: %d\n",
1210 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1214 rwr = (struct ib_reg_wr) {
1215 .wr.opcode = IB_WR_REG_MR,
1216 .wr.wr_cqe = &fast_reg_cqe,
1218 .key = req->mr->rkey,
1219 .access = (IB_ACCESS_LOCAL_WRITE |
1220 IB_ACCESS_REMOTE_WRITE),
1224 msg->sg_cnt = cpu_to_le16(1);
1225 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1227 msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1228 msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1229 msg->desc[0].len = cpu_to_le32(req->mr->length);
1231 /* Further invalidation is required */
1232 req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1239 * rtrs message will be after the space reserved for disk data and
1242 imm = req->permit->mem_off + req->data_len + req->usr_len;
1243 imm = rtrs_to_io_req_imm(imm);
1244 buf_id = req->permit->mem_id;
1246 req->sg_size = sizeof(*msg);
1247 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1248 req->sg_size += req->usr_len;
1251 * Update stats now, after request is successfully sent it is not
1252 * safe anymore to touch it.
1254 rtrs_clt_update_all_stats(req, READ);
1256 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1257 req->data_len, imm, wr);
1260 "Read request failed: error=%d path=%s [%s:%u]\n",
1261 ret, kobject_name(&sess->kobj), sess->hca_name,
1263 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1264 atomic_dec(&sess->stats->inflight);
1265 req->need_inv = false;
1267 ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1268 req->sg_cnt, req->dir);
1275 * rtrs_clt_failover_req() - Try to find an active path for a failed request
1277 * @fail_req: a failed io request.
1279 static int rtrs_clt_failover_req(struct rtrs_clt *clt,
1280 struct rtrs_clt_io_req *fail_req)
1282 struct rtrs_clt_sess *alive_sess;
1283 struct rtrs_clt_io_req *req;
1284 int err = -ECONNABORTED;
1288 for (path_it_init(&it, clt);
1289 (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
1291 if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)
1293 req = rtrs_clt_get_copy_req(alive_sess, fail_req);
1294 if (req->dir == DMA_TO_DEVICE)
1295 err = rtrs_clt_write_req(req);
1297 err = rtrs_clt_read_req(req);
1299 req->in_use = false;
1303 rtrs_clt_inc_failover_cnt(alive_sess->stats);
1306 path_it_deinit(&it);
1312 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
1314 struct rtrs_clt *clt = sess->clt;
1315 struct rtrs_clt_io_req *req;
1320 for (i = 0; i < sess->queue_depth; ++i) {
1321 req = &sess->reqs[i];
1326 * Safely (without notification) complete failed request.
1327 * After completion this request is still useble and can
1328 * be failovered to another path.
1330 complete_rdma_req(req, -ECONNABORTED, false, true);
1332 err = rtrs_clt_failover_req(clt, req);
1334 /* Failover failed, notify anyway */
1335 req->conf(req->priv, err);
1339 static void free_sess_reqs(struct rtrs_clt_sess *sess)
1341 struct rtrs_clt_io_req *req;
1346 for (i = 0; i < sess->queue_depth; ++i) {
1347 req = &sess->reqs[i];
1349 ib_dereg_mr(req->mr);
1351 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
1357 static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
1359 struct rtrs_clt_io_req *req;
1360 int i, err = -ENOMEM;
1362 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
1367 for (i = 0; i < sess->queue_depth; ++i) {
1368 req = &sess->reqs[i];
1369 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
1370 sess->s.dev->ib_dev,
1372 rtrs_clt_rdma_done);
1376 req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
1380 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
1381 sess->max_pages_per_mr);
1382 if (IS_ERR(req->mr)) {
1383 err = PTR_ERR(req->mr);
1385 pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
1386 sess->max_pages_per_mr);
1390 init_completion(&req->inv_comp);
1396 free_sess_reqs(sess);
1401 static int alloc_permits(struct rtrs_clt *clt)
1403 unsigned int chunk_bits;
1406 clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
1407 sizeof(long), GFP_KERNEL);
1408 if (!clt->permits_map) {
1412 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1413 if (!clt->permits) {
1417 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1418 for (i = 0; i < clt->queue_depth; i++) {
1419 struct rtrs_permit *permit;
1421 permit = get_permit(clt, i);
1423 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1429 kfree(clt->permits_map);
1430 clt->permits_map = NULL;
1435 static void free_permits(struct rtrs_clt *clt)
1437 if (clt->permits_map) {
1438 size_t sz = clt->queue_depth;
1440 wait_event(clt->permits_wait,
1441 find_first_bit(clt->permits_map, sz) >= sz);
1443 kfree(clt->permits_map);
1444 clt->permits_map = NULL;
1445 kfree(clt->permits);
1446 clt->permits = NULL;
1449 static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
1451 struct ib_device *ib_dev;
1452 u64 max_pages_per_mr;
1455 ib_dev = sess->s.dev->ib_dev;
1458 * Use the smallest page size supported by the HCA, down to a
1459 * minimum of 4096 bytes. We're unlikely to build large sglists
1460 * out of smaller entries.
1462 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1463 max_pages_per_mr = ib_dev->attrs.max_mr_size;
1464 do_div(max_pages_per_mr, (1ull << mr_page_shift));
1465 sess->max_pages_per_mr =
1466 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
1467 ib_dev->attrs.max_fast_reg_page_list_len);
1468 sess->clt->max_segments =
1469 min(sess->max_pages_per_mr, sess->clt->max_segments);
1472 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
1473 enum rtrs_clt_state new_state,
1474 enum rtrs_clt_state *old_state)
1478 spin_lock_irq(&sess->state_wq.lock);
1480 *old_state = sess->state;
1481 changed = rtrs_clt_change_state(sess, new_state);
1482 spin_unlock_irq(&sess->state_wq.lock);
1487 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1489 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1491 rtrs_rdma_error_recovery(con);
1494 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
1496 rtrs_init_hb(&sess->s, &io_comp_cqe,
1497 RTRS_HB_INTERVAL_MS,
1499 rtrs_clt_hb_err_handler,
1503 static void rtrs_clt_reconnect_work(struct work_struct *work);
1504 static void rtrs_clt_close_work(struct work_struct *work);
1506 static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
1507 const struct rtrs_addr *path,
1508 size_t con_num, u32 nr_poll_queues)
1510 struct rtrs_clt_sess *sess;
1515 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
1521 * +1: Extra connection for user messages
1523 total_con = con_num + nr_poll_queues + 1;
1524 sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
1528 sess->s.con_num = total_con;
1529 sess->s.irq_con_num = con_num + 1;
1531 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
1535 mutex_init(&sess->init_mutex);
1536 uuid_gen(&sess->s.uuid);
1537 memcpy(&sess->s.dst_addr, path->dst,
1538 rdma_addr_size((struct sockaddr *)path->dst));
1541 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1542 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1543 * the sess->src_addr will contain only zeros, which is then fine.
1546 memcpy(&sess->s.src_addr, path->src,
1547 rdma_addr_size((struct sockaddr *)path->src));
1548 strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
1550 sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
1551 init_waitqueue_head(&sess->state_wq);
1552 sess->state = RTRS_CLT_CONNECTING;
1553 atomic_set(&sess->connected_cnt, 0);
1554 INIT_WORK(&sess->close_work, rtrs_clt_close_work);
1555 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
1556 rtrs_clt_init_hb(sess);
1558 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
1559 if (!sess->mp_skip_entry)
1560 goto err_free_stats;
1562 for_each_possible_cpu(cpu)
1563 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
1565 err = rtrs_clt_init_stats(sess->stats);
1567 goto err_free_percpu;
1572 free_percpu(sess->mp_skip_entry);
1580 return ERR_PTR(err);
1583 void free_sess(struct rtrs_clt_sess *sess)
1585 free_percpu(sess->mp_skip_entry);
1586 mutex_destroy(&sess->init_mutex);
1592 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
1594 struct rtrs_clt_con *con;
1596 con = kzalloc(sizeof(*con), GFP_KERNEL);
1600 /* Map first two connections to the first CPU */
1601 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1603 con->c.sess = &sess->s;
1604 /* Align with srv, init as 1 */
1605 atomic_set(&con->c.wr_cnt, 1);
1606 mutex_init(&con->con_mutex);
1608 sess->s.con[cid] = &con->c;
1613 static void destroy_con(struct rtrs_clt_con *con)
1615 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1617 sess->s.con[con->c.cid] = NULL;
1618 mutex_destroy(&con->con_mutex);
1622 static int create_con_cq_qp(struct rtrs_clt_con *con)
1624 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1625 u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
1627 struct rtrs_msg_rkey_rsp *rsp;
1629 lockdep_assert_held(&con->con_mutex);
1630 if (con->c.cid == 0) {
1632 /* We must be the first here */
1633 if (WARN_ON(sess->s.dev))
1637 * The whole session uses device from user connection.
1638 * Be careful not to close user connection before ib dev
1639 * is gracefully put.
1641 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1645 "rtrs_ib_dev_find_get_or_add(): no memory\n");
1648 sess->s.dev_ref = 1;
1649 query_fast_reg_mode(sess);
1650 wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
1652 * Two (request + registration) completion for send
1653 * Two for recv if always_invalidate is set on server
1655 * + 2 for drain and heartbeat
1656 * in case qp gets into error state.
1659 min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1660 max_recv_wr = max_send_wr;
1663 * Here we assume that session members are correctly set.
1664 * This is always true if user connection (cid == 0) is
1665 * established first.
1667 if (WARN_ON(!sess->s.dev))
1669 if (WARN_ON(!sess->queue_depth))
1672 wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
1673 /* Shared between connections */
1675 max_send_wr = min_t(int, wr_limit,
1676 /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1677 sess->queue_depth * 3 + 1);
1678 max_recv_wr = min_t(int, wr_limit,
1679 sess->queue_depth * 3 + 1);
1682 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1683 cq_num = max_send_wr + max_recv_wr;
1684 /* alloc iu to recv new rkey reply when server reports flags set */
1685 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1686 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1687 GFP_KERNEL, sess->s.dev->ib_dev,
1689 rtrs_clt_rdma_done);
1692 con->queue_num = cq_num;
1694 cq_num = max_send_wr + max_recv_wr;
1695 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1696 if (con->c.cid >= sess->s.irq_con_num)
1697 err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
1698 cq_vector, cq_num, max_send_wr,
1699 max_recv_wr, IB_POLL_DIRECT);
1701 err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
1702 cq_vector, cq_num, max_send_wr,
1703 max_recv_wr, IB_POLL_SOFTIRQ);
1705 * In case of error we do not bother to clean previous allocations,
1706 * since destroy_con_cq_qp() must be called.
1711 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1713 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1716 * Be careful here: destroy_con_cq_qp() can be called even
1717 * create_con_cq_qp() failed, see comments there.
1719 lockdep_assert_held(&con->con_mutex);
1720 rtrs_cq_qp_destroy(&con->c);
1722 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
1723 con->rsp_ius = NULL;
1726 if (sess->s.dev_ref && !--sess->s.dev_ref) {
1727 rtrs_ib_dev_put(sess->s.dev);
1732 static void stop_cm(struct rtrs_clt_con *con)
1734 rdma_disconnect(con->c.cm_id);
1736 ib_drain_qp(con->c.qp);
1739 static void destroy_cm(struct rtrs_clt_con *con)
1741 rdma_destroy_id(con->c.cm_id);
1742 con->c.cm_id = NULL;
1745 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1747 struct rtrs_sess *s = con->c.sess;
1750 mutex_lock(&con->con_mutex);
1751 err = create_con_cq_qp(con);
1752 mutex_unlock(&con->con_mutex);
1754 rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1757 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1759 rtrs_err(s, "Resolving route failed, err: %d\n", err);
1764 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1766 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1767 struct rtrs_clt *clt = sess->clt;
1768 struct rtrs_msg_conn_req msg;
1769 struct rdma_conn_param param;
1773 param = (struct rdma_conn_param) {
1775 .rnr_retry_count = 7,
1776 .private_data = &msg,
1777 .private_data_len = sizeof(msg),
1780 msg = (struct rtrs_msg_conn_req) {
1781 .magic = cpu_to_le16(RTRS_MAGIC),
1782 .version = cpu_to_le16(RTRS_PROTO_VER),
1783 .cid = cpu_to_le16(con->c.cid),
1784 .cid_num = cpu_to_le16(sess->s.con_num),
1785 .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
1787 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
1788 uuid_copy(&msg.sess_uuid, &sess->s.uuid);
1789 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1791 err = rdma_connect_locked(con->c.cm_id, ¶m);
1793 rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1798 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1799 struct rdma_cm_event *ev)
1801 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1802 struct rtrs_clt *clt = sess->clt;
1803 const struct rtrs_msg_conn_rsp *msg;
1804 u16 version, queue_depth;
1808 msg = ev->param.conn.private_data;
1809 len = ev->param.conn.private_data_len;
1810 if (len < sizeof(*msg)) {
1811 rtrs_err(clt, "Invalid RTRS connection response\n");
1814 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1815 rtrs_err(clt, "Invalid RTRS magic\n");
1818 version = le16_to_cpu(msg->version);
1819 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1820 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1821 version >> 8, RTRS_PROTO_VER_MAJOR);
1824 errno = le16_to_cpu(msg->errno);
1826 rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1830 if (con->c.cid == 0) {
1831 queue_depth = le16_to_cpu(msg->queue_depth);
1833 if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
1834 rtrs_err(clt, "Error: queue depth changed\n");
1837 * Stop any more reconnection attempts
1839 sess->reconnect_attempts = -1;
1841 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1846 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
1851 sess->queue_depth = queue_depth;
1852 sess->s.signal_interval = min_not_zero(queue_depth,
1853 (unsigned short) SERVICE_CON_QUEUE_DEPTH);
1854 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1855 sess->max_io_size = le32_to_cpu(msg->max_io_size);
1856 sess->flags = le32_to_cpu(msg->flags);
1857 sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
1860 * Global IO size is always a minimum.
1861 * If while a reconnection server sends us a value a bit
1862 * higher - client does not care and uses cached minimum.
1864 * Since we can have several sessions (paths) restablishing
1865 * connections in parallel, use lock.
1867 mutex_lock(&clt->paths_mutex);
1868 clt->queue_depth = sess->queue_depth;
1869 clt->max_io_size = min_not_zero(sess->max_io_size,
1871 mutex_unlock(&clt->paths_mutex);
1874 * Cache the hca_port and hca_name for sysfs
1876 sess->hca_port = con->c.cm_id->port_num;
1877 scnprintf(sess->hca_name, sizeof(sess->hca_name),
1878 sess->s.dev->ib_dev->name);
1879 sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1880 /* set for_new_clt, to allow future reconnect on any path */
1881 sess->for_new_clt = 1;
1887 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1889 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1891 atomic_inc(&sess->connected_cnt);
1895 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1896 struct rdma_cm_event *ev)
1898 struct rtrs_sess *s = con->c.sess;
1899 const struct rtrs_msg_conn_rsp *msg;
1900 const char *rej_msg;
1904 status = ev->status;
1905 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1906 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1908 if (msg && data_len >= sizeof(*msg)) {
1909 errno = (int16_t)le16_to_cpu(msg->errno);
1910 if (errno == -EBUSY)
1912 "Previous session is still exists on the server, please reconnect later\n");
1915 "Connect rejected: status %d (%s), rtrs errno %d\n",
1916 status, rej_msg, errno);
1919 "Connect rejected but with malformed message: status %d (%s)\n",
1926 void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
1928 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
1929 queue_work(rtrs_wq, &sess->close_work);
1931 flush_work(&sess->close_work);
1934 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1936 if (con->cm_err == 1) {
1937 struct rtrs_clt_sess *sess;
1939 sess = to_clt_sess(con->c.sess);
1940 if (atomic_dec_and_test(&sess->connected_cnt))
1942 wake_up(&sess->state_wq);
1944 con->cm_err = cm_err;
1947 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1948 struct rdma_cm_event *ev)
1950 struct rtrs_clt_con *con = cm_id->context;
1951 struct rtrs_sess *s = con->c.sess;
1952 struct rtrs_clt_sess *sess = to_clt_sess(s);
1955 switch (ev->event) {
1956 case RDMA_CM_EVENT_ADDR_RESOLVED:
1957 cm_err = rtrs_rdma_addr_resolved(con);
1959 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1960 cm_err = rtrs_rdma_route_resolved(con);
1962 case RDMA_CM_EVENT_ESTABLISHED:
1963 cm_err = rtrs_rdma_conn_established(con, ev);
1966 * Report success and wake up. Here we abuse state_wq,
1967 * i.e. wake up without state change, but we set cm_err.
1969 flag_success_on_conn(con);
1970 wake_up(&sess->state_wq);
1974 case RDMA_CM_EVENT_REJECTED:
1975 cm_err = rtrs_rdma_conn_rejected(con, ev);
1977 case RDMA_CM_EVENT_DISCONNECTED:
1978 /* No message for disconnecting */
1979 cm_err = -ECONNRESET;
1981 case RDMA_CM_EVENT_CONNECT_ERROR:
1982 case RDMA_CM_EVENT_UNREACHABLE:
1983 case RDMA_CM_EVENT_ADDR_CHANGE:
1984 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1985 rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
1986 rdma_event_msg(ev->event), ev->status);
1987 cm_err = -ECONNRESET;
1989 case RDMA_CM_EVENT_ADDR_ERROR:
1990 case RDMA_CM_EVENT_ROUTE_ERROR:
1991 rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
1992 rdma_event_msg(ev->event), ev->status);
1993 cm_err = -EHOSTUNREACH;
1995 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1997 * Device removal is a special case. Queue close and return 0.
1999 rtrs_clt_close_conns(sess, false);
2002 rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
2003 rdma_event_msg(ev->event), ev->status);
2004 cm_err = -ECONNRESET;
2010 * cm error makes sense only on connection establishing,
2011 * in other cases we rely on normal procedure of reconnecting.
2013 flag_error_on_conn(con, cm_err);
2014 rtrs_rdma_error_recovery(con);
2020 static int create_cm(struct rtrs_clt_con *con)
2022 struct rtrs_sess *s = con->c.sess;
2023 struct rtrs_clt_sess *sess = to_clt_sess(s);
2024 struct rdma_cm_id *cm_id;
2027 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2028 sess->s.dst_addr.ss_family == AF_IB ?
2029 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
2030 if (IS_ERR(cm_id)) {
2031 err = PTR_ERR(cm_id);
2032 rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
2036 con->c.cm_id = cm_id;
2038 /* allow the port to be reused */
2039 err = rdma_set_reuseaddr(cm_id, 1);
2041 rtrs_err(s, "Set address reuse failed, err: %d\n", err);
2044 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
2045 (struct sockaddr *)&sess->s.dst_addr,
2046 RTRS_CONNECT_TIMEOUT_MS);
2048 rtrs_err(s, "Failed to resolve address, err: %d\n", err);
2052 * Combine connection status and session events. This is needed
2053 * for waiting two possible cases: cm_err has something meaningful
2054 * or session state was really changed to error by device removal.
2056 err = wait_event_interruptible_timeout(
2058 con->cm_err || sess->state != RTRS_CLT_CONNECTING,
2059 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2060 if (err == 0 || err == -ERESTARTSYS) {
2063 /* Timedout or interrupted */
2066 if (con->cm_err < 0) {
2070 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
2071 /* Device removal */
2072 err = -ECONNABORTED;
2080 mutex_lock(&con->con_mutex);
2081 destroy_con_cq_qp(con);
2082 mutex_unlock(&con->con_mutex);
2089 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
2091 struct rtrs_clt *clt = sess->clt;
2095 * We can fire RECONNECTED event only when all paths were
2096 * connected on rtrs_clt_open(), then each was disconnected
2097 * and the first one connected again. That's why this nasty
2098 * game with counter value.
2101 mutex_lock(&clt->paths_ev_mutex);
2102 up = ++clt->paths_up;
2104 * Here it is safe to access paths num directly since up counter
2105 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
2106 * in progress, thus paths removals are impossible.
2108 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2109 clt->paths_up = clt->paths_num;
2111 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2112 mutex_unlock(&clt->paths_ev_mutex);
2114 /* Mark session as established */
2115 sess->established = true;
2116 sess->reconnect_attempts = 0;
2117 sess->stats->reconnects.successful_cnt++;
2120 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
2122 struct rtrs_clt *clt = sess->clt;
2124 if (!sess->established)
2127 sess->established = false;
2128 mutex_lock(&clt->paths_ev_mutex);
2129 WARN_ON(!clt->paths_up);
2130 if (--clt->paths_up == 0)
2131 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2132 mutex_unlock(&clt->paths_ev_mutex);
2135 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
2137 struct rtrs_clt_con *con;
2140 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
2143 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2144 * exactly in between. Start destroying after it finishes.
2146 mutex_lock(&sess->init_mutex);
2147 mutex_unlock(&sess->init_mutex);
2150 * All IO paths must observe !CONNECTED state before we
2155 rtrs_stop_hb(&sess->s);
2158 * The order it utterly crucial: firstly disconnect and complete all
2159 * rdma requests with error (thus set in_use=false for requests),
2160 * then fail outstanding requests checking in_use for each, and
2161 * eventually notify upper layer about session disconnection.
2164 for (cid = 0; cid < sess->s.con_num; cid++) {
2165 if (!sess->s.con[cid])
2167 con = to_clt_con(sess->s.con[cid]);
2170 fail_all_outstanding_reqs(sess);
2171 free_sess_reqs(sess);
2172 rtrs_clt_sess_down(sess);
2175 * Wait for graceful shutdown, namely when peer side invokes
2176 * rdma_disconnect(). 'connected_cnt' is decremented only on
2177 * CM events, thus if other side had crashed and hb has detected
2178 * something is wrong, here we will stuck for exactly timeout ms,
2179 * since CM does not fire anything. That is fine, we are not in
2182 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
2183 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2185 for (cid = 0; cid < sess->s.con_num; cid++) {
2186 if (!sess->s.con[cid])
2188 con = to_clt_con(sess->s.con[cid]);
2189 mutex_lock(&con->con_mutex);
2190 destroy_con_cq_qp(con);
2191 mutex_unlock(&con->con_mutex);
2197 static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
2198 struct rtrs_clt_sess *sess,
2199 struct rtrs_clt_sess *next)
2201 struct rtrs_clt_sess **ppcpu_path;
2203 /* Call cmpxchg() without sparse warnings */
2204 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
2205 return sess == cmpxchg(ppcpu_path, sess, next);
2208 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
2210 struct rtrs_clt *clt = sess->clt;
2211 struct rtrs_clt_sess *next;
2212 bool wait_for_grace = false;
2215 mutex_lock(&clt->paths_mutex);
2216 list_del_rcu(&sess->s.entry);
2218 /* Make sure everybody observes path removal. */
2222 * At this point nobody sees @sess in the list, but still we have
2223 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2224 * nobody can observe @sess in the list, we guarantee that IO path
2225 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2226 * to @sess, but can never again become @sess.
2230 * Decrement paths number only after grace period, because
2231 * caller of do_each_path() must firstly observe list without
2232 * path and only then decremented paths number.
2234 * Otherwise there can be the following situation:
2235 * o Two paths exist and IO is coming.
2236 * o One path is removed:
2238 * do_each_path(): rtrs_clt_remove_path_from_arr():
2239 * path = get_next_path()
2240 * ^^^ list_del_rcu(path)
2241 * [!CONNECTED path] clt->paths_num--
2243 * load clt->paths_num from 2 to 1
2247 * path is observed as !CONNECTED, but do_each_path() loop
2248 * ends, because expression i < clt->paths_num is false.
2253 * Get @next connection from current @sess which is going to be
2254 * removed. If @sess is the last element, then @next is NULL.
2257 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
2258 typeof(*next), s.entry);
2262 * @pcpu paths can still point to the path which is going to be
2263 * removed, so change the pointer manually.
2265 for_each_possible_cpu(cpu) {
2266 struct rtrs_clt_sess __rcu **ppcpu_path;
2268 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2269 if (rcu_dereference_protected(*ppcpu_path,
2270 lockdep_is_held(&clt->paths_mutex)) != sess)
2272 * synchronize_rcu() was called just after deleting
2273 * entry from the list, thus IO code path cannot
2274 * change pointer back to the pointer which is going
2275 * to be removed, we are safe here.
2280 * We race with IO code path, which also changes pointer,
2281 * thus we have to be careful not to overwrite it.
2283 if (xchg_sessions(ppcpu_path, sess, next))
2285 * @ppcpu_path was successfully replaced with @next,
2286 * that means that someone could also pick up the
2287 * @sess and dereferencing it right now, so wait for
2288 * a grace period is required.
2290 wait_for_grace = true;
2295 mutex_unlock(&clt->paths_mutex);
2298 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
2300 struct rtrs_clt *clt = sess->clt;
2302 mutex_lock(&clt->paths_mutex);
2305 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2306 mutex_unlock(&clt->paths_mutex);
2309 static void rtrs_clt_close_work(struct work_struct *work)
2311 struct rtrs_clt_sess *sess;
2313 sess = container_of(work, struct rtrs_clt_sess, close_work);
2315 cancel_delayed_work_sync(&sess->reconnect_dwork);
2316 rtrs_clt_stop_and_destroy_conns(sess);
2317 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
2320 static int init_conns(struct rtrs_clt_sess *sess)
2326 * On every new session connections increase reconnect counter
2327 * to avoid clashes with previous sessions not yet closed
2328 * sessions on a server side.
2330 sess->s.recon_cnt++;
2332 /* Establish all RDMA connections */
2333 for (cid = 0; cid < sess->s.con_num; cid++) {
2334 err = create_con(sess, cid);
2338 err = create_cm(to_clt_con(sess->s.con[cid]));
2340 destroy_con(to_clt_con(sess->s.con[cid]));
2344 err = alloc_sess_reqs(sess);
2348 rtrs_start_hb(&sess->s);
2354 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2358 mutex_lock(&con->con_mutex);
2359 destroy_con_cq_qp(con);
2360 mutex_unlock(&con->con_mutex);
2365 * If we've never taken async path and got an error, say,
2366 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2367 * manually to keep reconnecting.
2369 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2374 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2376 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2377 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2380 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2381 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2383 if (wc->status != IB_WC_SUCCESS) {
2384 rtrs_err(sess->clt, "Sess info request send failed: %s\n",
2385 ib_wc_status_msg(wc->status));
2386 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2390 rtrs_clt_update_wc_stats(con);
2393 static int process_info_rsp(struct rtrs_clt_sess *sess,
2394 const struct rtrs_msg_info_rsp *msg)
2396 unsigned int sg_cnt, total_len;
2399 sg_cnt = le16_to_cpu(msg->sg_cnt);
2400 if (!sg_cnt || (sess->queue_depth % sg_cnt)) {
2401 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
2407 * Check if IB immediate data size is enough to hold the mem_id and
2408 * the offset inside the memory chunk.
2410 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) >
2411 MAX_IMM_PAYL_BITS) {
2413 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2414 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
2418 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
2419 const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2423 addr = le64_to_cpu(desc->addr);
2424 rkey = le32_to_cpu(desc->key);
2425 len = le32_to_cpu(desc->len);
2429 if (!len || (len % sess->chunk_size)) {
2430 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
2434 for ( ; len && i < sess->queue_depth; i++) {
2435 sess->rbufs[i].addr = addr;
2436 sess->rbufs[i].rkey = rkey;
2438 len -= sess->chunk_size;
2439 addr += sess->chunk_size;
2443 if (sgi != sg_cnt || i != sess->queue_depth) {
2444 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
2447 if (total_len != sess->chunk_size * sess->queue_depth) {
2448 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
2455 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2457 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2458 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2459 struct rtrs_msg_info_rsp *msg;
2460 enum rtrs_clt_state state;
2465 state = RTRS_CLT_CONNECTING_ERR;
2467 WARN_ON(con->c.cid);
2468 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2469 if (wc->status != IB_WC_SUCCESS) {
2470 rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
2471 ib_wc_status_msg(wc->status));
2474 WARN_ON(wc->opcode != IB_WC_RECV);
2476 if (wc->byte_len < sizeof(*msg)) {
2477 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2481 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
2482 iu->size, DMA_FROM_DEVICE);
2484 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
2485 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
2486 le16_to_cpu(msg->type));
2489 rx_sz = sizeof(*msg);
2490 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2491 if (wc->byte_len < rx_sz) {
2492 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2496 err = process_info_rsp(sess, msg);
2500 err = post_recv_sess(sess);
2504 state = RTRS_CLT_CONNECTED;
2507 rtrs_clt_update_wc_stats(con);
2508 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2509 rtrs_clt_change_state_get_old(sess, state, NULL);
2512 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
2514 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
2515 struct rtrs_msg_info_req *msg;
2516 struct rtrs_iu *tx_iu, *rx_iu;
2520 rx_sz = sizeof(struct rtrs_msg_info_rsp);
2521 rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
2523 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2524 sess->s.dev->ib_dev, DMA_TO_DEVICE,
2525 rtrs_clt_info_req_done);
2526 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
2527 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2528 if (!tx_iu || !rx_iu) {
2532 /* Prepare for getting info response */
2533 err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2535 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2541 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2542 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
2544 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
2545 tx_iu->size, DMA_TO_DEVICE);
2547 /* Send info request */
2548 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2550 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
2555 /* Wait for state change */
2556 wait_event_interruptible_timeout(sess->state_wq,
2557 sess->state != RTRS_CLT_CONNECTING,
2559 RTRS_CONNECT_TIMEOUT_MS));
2560 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) {
2561 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
2569 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
2571 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
2573 /* If we've never taken async path because of malloc problems */
2574 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
2580 * init_sess() - establishes all session connections and does handshake
2581 * @sess: client session.
2582 * In case of error full close or reconnect procedure should be taken,
2583 * because reconnect or close async works can be started.
2585 static int init_sess(struct rtrs_clt_sess *sess)
2589 struct rtrs_addr path = {
2590 .src = &sess->s.src_addr,
2591 .dst = &sess->s.dst_addr,
2594 rtrs_addr_to_str(&path, str, sizeof(str));
2596 mutex_lock(&sess->init_mutex);
2597 err = init_conns(sess);
2600 "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
2601 str, sess->hca_name, sess->hca_port);
2604 err = rtrs_send_sess_info(sess);
2608 "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
2609 err, str, sess->hca_name, sess->hca_port);
2612 rtrs_clt_sess_up(sess);
2614 mutex_unlock(&sess->init_mutex);
2619 static void rtrs_clt_reconnect_work(struct work_struct *work)
2621 struct rtrs_clt_sess *sess;
2622 struct rtrs_clt *clt;
2623 unsigned int delay_ms;
2626 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
2630 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
2633 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
2634 /* Close a session completely if max attempts is reached */
2635 rtrs_clt_close_conns(sess, false);
2638 sess->reconnect_attempts++;
2640 /* Stop everything */
2641 rtrs_clt_stop_and_destroy_conns(sess);
2642 msleep(RTRS_RECONNECT_BACKOFF);
2643 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
2644 err = init_sess(sess);
2646 goto reconnect_again;
2652 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
2653 sess->stats->reconnects.fail_cnt++;
2654 delay_ms = clt->reconnect_delay_sec * 1000;
2655 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
2656 msecs_to_jiffies(delay_ms +
2658 RTRS_RECONNECT_SEED));
2662 static void rtrs_clt_dev_release(struct device *dev)
2664 struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
2669 static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
2670 u16 port, size_t pdu_sz, void *priv,
2671 void (*link_ev)(void *priv,
2672 enum rtrs_clt_link_ev ev),
2673 unsigned int reconnect_delay_sec,
2674 unsigned int max_reconnect_attempts)
2676 struct rtrs_clt *clt;
2679 if (!paths_num || paths_num > MAX_PATHS_NUM)
2680 return ERR_PTR(-EINVAL);
2682 if (strlen(sessname) >= sizeof(clt->sessname))
2683 return ERR_PTR(-EINVAL);
2685 clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2687 return ERR_PTR(-ENOMEM);
2689 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2690 if (!clt->pcpu_path) {
2692 return ERR_PTR(-ENOMEM);
2695 uuid_gen(&clt->paths_uuid);
2696 INIT_LIST_HEAD_RCU(&clt->paths_list);
2697 clt->paths_num = paths_num;
2698 clt->paths_up = MAX_PATHS_NUM;
2700 clt->pdu_sz = pdu_sz;
2701 clt->max_segments = RTRS_MAX_SEGMENTS;
2702 clt->reconnect_delay_sec = reconnect_delay_sec;
2703 clt->max_reconnect_attempts = max_reconnect_attempts;
2705 clt->link_ev = link_ev;
2706 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2707 strscpy(clt->sessname, sessname, sizeof(clt->sessname));
2708 init_waitqueue_head(&clt->permits_wait);
2709 mutex_init(&clt->paths_ev_mutex);
2710 mutex_init(&clt->paths_mutex);
2712 clt->dev.class = rtrs_clt_dev_class;
2713 clt->dev.release = rtrs_clt_dev_release;
2714 err = dev_set_name(&clt->dev, "%s", sessname);
2718 * Suppress user space notification until
2719 * sysfs files are created
2721 dev_set_uevent_suppress(&clt->dev, true);
2722 err = device_register(&clt->dev);
2724 put_device(&clt->dev);
2728 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2729 if (!clt->kobj_paths) {
2733 err = rtrs_clt_create_sysfs_root_files(clt);
2735 kobject_del(clt->kobj_paths);
2736 kobject_put(clt->kobj_paths);
2739 dev_set_uevent_suppress(&clt->dev, false);
2740 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2744 device_unregister(&clt->dev);
2746 free_percpu(clt->pcpu_path);
2748 return ERR_PTR(err);
2751 static void free_clt(struct rtrs_clt *clt)
2754 free_percpu(clt->pcpu_path);
2755 mutex_destroy(&clt->paths_ev_mutex);
2756 mutex_destroy(&clt->paths_mutex);
2757 /* release callback will free clt in last put */
2758 device_unregister(&clt->dev);
2762 * rtrs_clt_open() - Open a session to an RTRS server
2763 * @ops: holds the link event callback and the private pointer.
2764 * @sessname: name of the session
2765 * @paths: Paths to be established defined by their src and dst addresses
2766 * @paths_num: Number of elements in the @paths array
2767 * @port: port to be used by the RTRS session
2768 * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2769 * @reconnect_delay_sec: time between reconnect tries
2770 * @max_reconnect_attempts: Number of times to reconnect on error before giving
2771 * up, 0 for * disabled, -1 for forever
2772 * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
2774 * Starts session establishment with the rtrs_server. The function can block
2775 * up to ~2000ms before it returns.
2777 * Return a valid pointer on success otherwise PTR_ERR.
2779 struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
2780 const char *sessname,
2781 const struct rtrs_addr *paths,
2782 size_t paths_num, u16 port,
2783 size_t pdu_sz, u8 reconnect_delay_sec,
2784 s16 max_reconnect_attempts, u32 nr_poll_queues)
2786 struct rtrs_clt_sess *sess, *tmp;
2787 struct rtrs_clt *clt;
2790 clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
2792 reconnect_delay_sec,
2793 max_reconnect_attempts);
2798 for (i = 0; i < paths_num; i++) {
2799 struct rtrs_clt_sess *sess;
2801 sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
2804 err = PTR_ERR(sess);
2805 goto close_all_sess;
2808 sess->for_new_clt = 1;
2809 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2811 err = init_sess(sess);
2813 list_del_rcu(&sess->s.entry);
2814 rtrs_clt_close_conns(sess, true);
2815 free_percpu(sess->stats->pcpu_stats);
2818 goto close_all_sess;
2821 err = rtrs_clt_create_sess_files(sess);
2823 list_del_rcu(&sess->s.entry);
2824 rtrs_clt_close_conns(sess, true);
2825 free_percpu(sess->stats->pcpu_stats);
2828 goto close_all_sess;
2831 err = alloc_permits(clt);
2833 goto close_all_sess;
2838 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2839 rtrs_clt_destroy_sess_files(sess, NULL);
2840 rtrs_clt_close_conns(sess, true);
2841 kobject_put(&sess->kobj);
2843 rtrs_clt_destroy_sysfs_root(clt);
2847 return ERR_PTR(err);
2849 EXPORT_SYMBOL(rtrs_clt_open);
2852 * rtrs_clt_close() - Close a session
2853 * @clt: Session handle. Session is freed upon return.
2855 void rtrs_clt_close(struct rtrs_clt *clt)
2857 struct rtrs_clt_sess *sess, *tmp;
2859 /* Firstly forbid sysfs access */
2860 rtrs_clt_destroy_sysfs_root(clt);
2862 /* Now it is safe to iterate over all paths without locks */
2863 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2864 rtrs_clt_close_conns(sess, true);
2865 rtrs_clt_destroy_sess_files(sess, NULL);
2866 kobject_put(&sess->kobj);
2870 EXPORT_SYMBOL(rtrs_clt_close);
2872 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
2874 enum rtrs_clt_state old_state;
2878 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
2881 sess->reconnect_attempts = 0;
2882 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
2884 if (changed || old_state == RTRS_CLT_RECONNECTING) {
2886 * flush_delayed_work() queues pending work for immediate
2887 * execution, so do the flush if we have queued something
2888 * right now or work is pending.
2890 flush_delayed_work(&sess->reconnect_dwork);
2891 err = (READ_ONCE(sess->state) ==
2892 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2898 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
2899 const struct attribute *sysfs_self)
2901 enum rtrs_clt_state old_state;
2905 * Continue stopping path till state was changed to DEAD or
2906 * state was observed as DEAD:
2907 * 1. State was changed to DEAD - we were fast and nobody
2908 * invoked rtrs_clt_reconnect(), which can again start
2910 * 2. State was observed as DEAD - we have someone in parallel
2911 * removing the path.
2914 rtrs_clt_close_conns(sess, true);
2915 changed = rtrs_clt_change_state_get_old(sess,
2918 } while (!changed && old_state != RTRS_CLT_DEAD);
2921 rtrs_clt_remove_path_from_arr(sess);
2922 rtrs_clt_destroy_sess_files(sess, sysfs_self);
2923 kobject_put(&sess->kobj);
2929 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
2931 clt->max_reconnect_attempts = (unsigned int)value;
2934 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
2936 return (int)clt->max_reconnect_attempts;
2940 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2943 * @ops: callback function to be called as confirmation, and the pointer.
2945 * @permit: Preallocated permit
2946 * @vec: Message that is sent to server together with the request.
2947 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2948 * Since the msg is copied internally it can be allocated on stack.
2949 * @nr: Number of elements in @vec.
2950 * @data_len: length of data sent to/from server
2951 * @sg: Pages to be sent/received to/from server.
2952 * @sg_cnt: Number of elements in the @sg
2958 * On dir=READ rtrs client will request a data transfer from Server to client.
2959 * The data that the server will respond with will be stored in @sg when
2960 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2961 * On dir=WRITE rtrs client will rdma write data in sg to server side.
2963 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2964 struct rtrs_clt *clt, struct rtrs_permit *permit,
2965 const struct kvec *vec, size_t nr, size_t data_len,
2966 struct scatterlist *sg, unsigned int sg_cnt)
2968 struct rtrs_clt_io_req *req;
2969 struct rtrs_clt_sess *sess;
2971 enum dma_data_direction dma_dir;
2972 int err = -ECONNABORTED, i;
2973 size_t usr_len, hdr_len;
2976 /* Get kvec length */
2977 for (i = 0, usr_len = 0; i < nr; i++)
2978 usr_len += vec[i].iov_len;
2981 hdr_len = sizeof(struct rtrs_msg_rdma_read) +
2982 sg_cnt * sizeof(struct rtrs_sg_desc);
2983 dma_dir = DMA_FROM_DEVICE;
2985 hdr_len = sizeof(struct rtrs_msg_rdma_write);
2986 dma_dir = DMA_TO_DEVICE;
2990 for (path_it_init(&it, clt);
2991 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2992 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
2995 if (usr_len + hdr_len > sess->max_hdr_size) {
2996 rtrs_wrn_rl(sess->clt,
2997 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
2998 dir == READ ? "Read" : "Write",
2999 usr_len, hdr_len, sess->max_hdr_size);
3003 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
3004 vec, usr_len, sg, sg_cnt, data_len,
3007 err = rtrs_clt_read_req(req);
3009 err = rtrs_clt_write_req(req);
3011 req->in_use = false;
3017 path_it_deinit(&it);
3022 EXPORT_SYMBOL(rtrs_clt_request);
3024 int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
3026 /* If no path, return -1 for block layer not to try again */
3028 struct rtrs_con *con;
3029 struct rtrs_clt_sess *sess;
3033 for (path_it_init(&it, clt);
3034 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3035 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
3038 con = sess->s.con[index + 1];
3039 cnt = ib_process_cq_direct(con->cq, -1);
3043 path_it_deinit(&it);
3048 EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
3051 * rtrs_clt_query() - queries RTRS session attributes
3052 *@clt: session pointer
3053 *@attr: query results for session attributes.
3056 * -ECOMM no connection to the server
3058 int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
3060 if (!rtrs_clt_is_connected(clt))
3063 attr->queue_depth = clt->queue_depth;
3064 attr->max_segments = clt->max_segments;
3065 /* Cap max_io_size to min of remote buffer size and the fr pages */
3066 attr->max_io_size = min_t(int, clt->max_io_size,
3067 clt->max_segments * SZ_4K);
3071 EXPORT_SYMBOL(rtrs_clt_query);
3073 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
3074 struct rtrs_addr *addr)
3076 struct rtrs_clt_sess *sess;
3079 sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
3081 return PTR_ERR(sess);
3083 mutex_lock(&clt->paths_mutex);
3084 if (clt->paths_num == 0) {
3086 * When all the paths are removed for a session,
3087 * the addition of the first path is like a new session for
3088 * the storage server
3090 sess->for_new_clt = 1;
3093 mutex_unlock(&clt->paths_mutex);
3096 * It is totally safe to add path in CONNECTING state: coming
3097 * IO will never grab it. Also it is very important to add
3098 * path before init, since init fires LINK_CONNECTED event.
3100 rtrs_clt_add_path_to_arr(sess);
3102 err = init_sess(sess);
3106 err = rtrs_clt_create_sess_files(sess);
3113 rtrs_clt_remove_path_from_arr(sess);
3114 rtrs_clt_close_conns(sess, true);
3115 free_percpu(sess->stats->pcpu_stats);
3122 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
3124 if (!(dev->ib_dev->attrs.device_cap_flags &
3125 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
3126 pr_err("Memory registrations not supported.\n");
3133 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
3134 .init = rtrs_clt_ib_dev_init
3137 static int __init rtrs_client_init(void)
3139 rtrs_rdma_dev_pd_init(0, &dev_pd);
3141 rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
3142 if (IS_ERR(rtrs_clt_dev_class)) {
3143 pr_err("Failed to create rtrs-client dev class\n");
3144 return PTR_ERR(rtrs_clt_dev_class);
3146 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3148 class_destroy(rtrs_clt_dev_class);
3155 static void __exit rtrs_client_exit(void)
3157 destroy_workqueue(rtrs_wq);
3158 class_destroy(rtrs_clt_dev_class);
3159 rtrs_rdma_dev_pd_deinit(&dev_pd);
3162 module_init(rtrs_client_init);
3163 module_exit(rtrs_client_exit);