1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
20 #define RTRS_CONNECT_TIMEOUT_MS 30000
22 * Wait a bit before trying to reconnect after a failure
23 * in order to give server time to finish clean up which
24 * leads to "false positives" failed reconnect attempts
26 #define RTRS_RECONNECT_BACKOFF 1000
28 * Wait for additional random time between 0 and 8 seconds
29 * before starting to reconnect to avoid clients reconnecting
30 * all at once in case of a major network outage
32 #define RTRS_RECONNECT_SEED 8
34 #define FIRST_CONN 0x01
35 /* limit to 128 * 4k = 512k max IO */
36 #define RTRS_MAX_SEGMENTS 128
38 MODULE_DESCRIPTION("RDMA Transport Client");
39 MODULE_LICENSE("GPL");
41 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
42 static struct rtrs_rdma_dev_pd dev_pd = {
46 static struct workqueue_struct *rtrs_wq;
47 static struct class *rtrs_clt_dev_class;
49 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
51 struct rtrs_clt_path *clt_path;
52 bool connected = false;
55 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
56 connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
62 static struct rtrs_permit *
63 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
65 size_t max_depth = clt->queue_depth;
66 struct rtrs_permit *permit;
70 * Adapted from null_blk get_tag(). Callers from different cpus may
71 * grab the same bit, since find_first_zero_bit is not atomic.
72 * But then the test_and_set_bit_lock will fail for all the
73 * callers but one, so that they will loop again.
74 * This way an explicit spinlock is not required.
77 bit = find_first_zero_bit(clt->permits_map, max_depth);
80 } while (test_and_set_bit_lock(bit, clt->permits_map));
82 permit = get_permit(clt, bit);
83 WARN_ON(permit->mem_id != bit);
84 permit->cpu_id = raw_smp_processor_id();
85 permit->con_type = con_type;
90 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
91 struct rtrs_permit *permit)
93 clear_bit_unlock(permit->mem_id, clt->permits_map);
97 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
98 * @clt: Current session
99 * @con_type: Type of connection to use with the permit
100 * @can_wait: Wait type
103 * Allocates permit for the following RDMA operation. Permit is used
104 * to preallocate all resources and to propagate memory pressure
108 * Can sleep if @wait == RTRS_PERMIT_WAIT
110 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
111 enum rtrs_clt_con_type con_type,
112 enum wait_type can_wait)
114 struct rtrs_permit *permit;
117 permit = __rtrs_get_permit(clt, con_type);
118 if (permit || !can_wait)
122 prepare_to_wait(&clt->permits_wait, &wait,
123 TASK_UNINTERRUPTIBLE);
124 permit = __rtrs_get_permit(clt, con_type);
131 finish_wait(&clt->permits_wait, &wait);
135 EXPORT_SYMBOL(rtrs_clt_get_permit);
138 * rtrs_clt_put_permit() - puts allocated permit
139 * @clt: Current session
140 * @permit: Permit to be freed
145 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
146 struct rtrs_permit *permit)
148 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
151 __rtrs_put_permit(clt, permit);
154 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
155 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
156 * it must have added itself to &clt->permits_wait before
157 * __rtrs_put_permit() finished.
158 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
160 if (waitqueue_active(&clt->permits_wait))
161 wake_up(&clt->permits_wait);
163 EXPORT_SYMBOL(rtrs_clt_put_permit);
166 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
167 * @clt_path: client path pointer
168 * @permit: permit for the allocation of the RDMA buffer
170 * IO connection starts from 1.
171 * 0 connection is for user messages.
174 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
175 struct rtrs_permit *permit)
179 if (permit->con_type == RTRS_IO_CON)
180 id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
182 return to_clt_con(clt_path->s.con[id]);
186 * rtrs_clt_change_state() - change the session state through session state
189 * @clt_path: client path to change the state of.
190 * @new_state: state to change to.
192 * returns true if sess's state is changed to new state, otherwise return false.
195 * state_wq lock must be hold.
197 static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
198 enum rtrs_clt_state new_state)
200 enum rtrs_clt_state old_state;
201 bool changed = false;
203 lockdep_assert_held(&clt_path->state_wq.lock);
205 old_state = clt_path->state;
207 case RTRS_CLT_CONNECTING:
209 case RTRS_CLT_RECONNECTING:
216 case RTRS_CLT_RECONNECTING:
218 case RTRS_CLT_CONNECTED:
219 case RTRS_CLT_CONNECTING_ERR:
220 case RTRS_CLT_CLOSED:
227 case RTRS_CLT_CONNECTED:
229 case RTRS_CLT_CONNECTING:
236 case RTRS_CLT_CONNECTING_ERR:
238 case RTRS_CLT_CONNECTING:
245 case RTRS_CLT_CLOSING:
247 case RTRS_CLT_CONNECTING:
248 case RTRS_CLT_CONNECTING_ERR:
249 case RTRS_CLT_RECONNECTING:
250 case RTRS_CLT_CONNECTED:
257 case RTRS_CLT_CLOSED:
259 case RTRS_CLT_CLOSING:
268 case RTRS_CLT_CLOSED:
279 clt_path->state = new_state;
280 wake_up_locked(&clt_path->state_wq);
286 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
287 enum rtrs_clt_state old_state,
288 enum rtrs_clt_state new_state)
290 bool changed = false;
292 spin_lock_irq(&clt_path->state_wq.lock);
293 if (clt_path->state == old_state)
294 changed = rtrs_clt_change_state(clt_path, new_state);
295 spin_unlock_irq(&clt_path->state_wq.lock);
300 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
302 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
304 if (rtrs_clt_change_state_from_to(clt_path,
306 RTRS_CLT_RECONNECTING)) {
307 struct rtrs_clt_sess *clt = clt_path->clt;
308 unsigned int delay_ms;
311 * Normal scenario, reconnect if we were successfully connected
313 delay_ms = clt->reconnect_delay_sec * 1000;
314 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
315 msecs_to_jiffies(delay_ms +
316 prandom_u32() % RTRS_RECONNECT_SEED));
319 * Error can happen just on establishing new connection,
320 * so notify waiter with error state, waiter is responsible
321 * for cleaning the rest and reconnect if needed.
323 rtrs_clt_change_state_from_to(clt_path,
325 RTRS_CLT_CONNECTING_ERR);
329 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
333 if (wc->status != IB_WC_SUCCESS) {
334 rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
335 ib_wc_status_msg(wc->status));
336 rtrs_rdma_error_recovery(con);
340 static struct ib_cqe fast_reg_cqe = {
341 .done = rtrs_clt_fast_reg_done
344 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
345 bool notify, bool can_wait);
347 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
349 struct rtrs_clt_io_req *req =
350 container_of(wc->wr_cqe, typeof(*req), inv_cqe);
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
353 if (wc->status != IB_WC_SUCCESS) {
354 rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
355 ib_wc_status_msg(wc->status));
356 rtrs_rdma_error_recovery(con);
358 req->need_inv = false;
359 if (req->need_inv_comp)
360 complete(&req->inv_comp);
362 /* Complete request from INV callback */
363 complete_rdma_req(req, req->inv_errno, true, false);
366 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
368 struct rtrs_clt_con *con = req->con;
369 struct ib_send_wr wr = {
370 .opcode = IB_WR_LOCAL_INV,
371 .wr_cqe = &req->inv_cqe,
372 .send_flags = IB_SEND_SIGNALED,
373 .ex.invalidate_rkey = req->mr->rkey,
375 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
377 return ib_post_send(con->c.qp, &wr, NULL);
380 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
381 bool notify, bool can_wait)
383 struct rtrs_clt_con *con = req->con;
384 struct rtrs_clt_path *clt_path;
387 if (WARN_ON(!req->in_use))
389 if (WARN_ON(!req->con))
391 clt_path = to_clt_path(con->c.path);
394 if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
396 * We are here to invalidate read requests
397 * ourselves. In normal scenario server should
398 * send INV for all read requests, but
399 * we are here, thus two things could happen:
401 * 1. this is failover, when errno != 0
404 * 2. something totally bad happened and
405 * server forgot to send INV, so we
406 * should do that ourselves.
410 req->need_inv_comp = true;
412 /* This should be IO path, so always notify */
414 /* Save errno for INV callback */
415 req->inv_errno = errno;
418 refcount_inc(&req->ref);
419 err = rtrs_inv_rkey(req);
421 rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
423 } else if (can_wait) {
424 wait_for_completion(&req->inv_comp);
427 * Something went wrong, so request will be
428 * completed from INV callback.
434 if (!refcount_dec_and_test(&req->ref))
437 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
438 req->sg_cnt, req->dir);
440 if (!refcount_dec_and_test(&req->ref))
442 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
443 atomic_dec(&clt_path->stats->inflight);
449 rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
450 errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
451 clt_path->hca_port, notify);
455 req->conf(req->priv, errno);
458 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
459 struct rtrs_clt_io_req *req,
460 struct rtrs_rbuf *rbuf, u32 off,
461 u32 imm, struct ib_send_wr *wr)
463 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
464 enum ib_send_flags flags;
468 rtrs_wrn(con->c.path,
469 "Doing RDMA Write failed, no data supplied\n");
473 /* user data and user message in the first list element */
474 sge.addr = req->iu->dma_addr;
475 sge.length = req->sg_size;
476 sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
479 * From time to time we have to post signalled sends,
480 * or send queue will fill up and only QP reset can help.
482 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
483 0 : IB_SEND_SIGNALED;
485 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
487 req->sg_size, DMA_TO_DEVICE);
489 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
490 rbuf->rkey, rbuf->addr + off,
491 imm, flags, wr, NULL);
494 static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
495 s16 errno, bool w_inval)
497 struct rtrs_clt_io_req *req;
499 if (WARN_ON(msg_id >= clt_path->queue_depth))
502 req = &clt_path->reqs[msg_id];
503 /* Drop need_inv if server responded with send with invalidation */
504 req->need_inv &= !w_inval;
505 complete_rdma_req(req, errno, true, false);
508 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
512 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
514 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
515 iu = container_of(wc->wr_cqe, struct rtrs_iu,
517 err = rtrs_iu_post_recv(&con->c, iu);
519 rtrs_err(con->c.path, "post iu failed %d\n", err);
520 rtrs_rdma_error_recovery(con);
524 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
526 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
527 struct rtrs_msg_rkey_rsp *msg;
528 u32 imm_type, imm_payload;
529 bool w_inval = false;
534 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
536 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
538 if (wc->byte_len < sizeof(*msg)) {
539 rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
543 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
544 iu->size, DMA_FROM_DEVICE);
546 if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
547 rtrs_err(clt_path->clt,
548 "rkey response is malformed: type %d\n",
549 le16_to_cpu(msg->type));
552 buf_id = le16_to_cpu(msg->buf_id);
553 if (WARN_ON(buf_id >= clt_path->queue_depth))
556 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
557 if (imm_type == RTRS_IO_RSP_IMM ||
558 imm_type == RTRS_IO_RSP_W_INV_IMM) {
561 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
562 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
564 if (WARN_ON(buf_id != msg_id))
566 clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
567 process_io_rsp(clt_path, msg_id, err, w_inval);
569 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
570 iu->size, DMA_FROM_DEVICE);
571 return rtrs_clt_recv_done(con, wc);
573 rtrs_rdma_error_recovery(con);
576 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
578 static struct ib_cqe io_comp_cqe = {
579 .done = rtrs_clt_rdma_done
583 * Post x2 empty WRs: first is for this RDMA with IMM,
584 * second is for RECV with INV, which happened earlier.
586 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
588 struct ib_recv_wr wr_arr[2], *wr;
591 memset(wr_arr, 0, sizeof(wr_arr));
592 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
596 /* Chain backwards */
597 wr->next = &wr_arr[i - 1];
600 return ib_post_recv(con->qp, wr, NULL);
603 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
605 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
606 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
607 u32 imm_type, imm_payload;
608 bool w_inval = false;
611 if (wc->status != IB_WC_SUCCESS) {
612 if (wc->status != IB_WC_WR_FLUSH_ERR) {
613 rtrs_err(clt_path->clt, "RDMA failed: %s\n",
614 ib_wc_status_msg(wc->status));
615 rtrs_rdma_error_recovery(con);
619 rtrs_clt_update_wc_stats(con);
621 switch (wc->opcode) {
622 case IB_WC_RECV_RDMA_WITH_IMM:
624 * post_recv() RDMA write completions of IO reqs (read/write)
627 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
629 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
630 &imm_type, &imm_payload);
631 if (imm_type == RTRS_IO_RSP_IMM ||
632 imm_type == RTRS_IO_RSP_W_INV_IMM) {
635 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
636 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
638 process_io_rsp(clt_path, msg_id, err, w_inval);
639 } else if (imm_type == RTRS_HB_MSG_IMM) {
641 rtrs_send_hb_ack(&clt_path->s);
642 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
643 return rtrs_clt_recv_done(con, wc);
644 } else if (imm_type == RTRS_HB_ACK_IMM) {
646 clt_path->s.hb_missed_cnt = 0;
647 clt_path->s.hb_cur_latency =
648 ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
649 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
650 return rtrs_clt_recv_done(con, wc);
652 rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
657 * Post x2 empty WRs: first is for this RDMA with IMM,
658 * second is for RECV with INV, which happened earlier.
660 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
662 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
664 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
666 rtrs_rdma_error_recovery(con);
671 * Key invalidations from server side
673 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
674 wc->wc_flags & IB_WC_WITH_IMM));
675 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
676 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
677 if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
678 return rtrs_clt_recv_done(con, wc);
680 return rtrs_clt_rkey_rsp_done(con, wc);
683 case IB_WC_RDMA_WRITE:
685 * post_send() RDMA write completions of IO reqs (read/write)
691 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
696 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
699 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
701 for (i = 0; i < q_size; i++) {
702 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
703 struct rtrs_iu *iu = &con->rsp_ius[i];
705 err = rtrs_iu_post_recv(&con->c, iu);
707 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
716 static int post_recv_path(struct rtrs_clt_path *clt_path)
721 for (cid = 0; cid < clt_path->s.con_num; cid++) {
723 q_size = SERVICE_CON_QUEUE_DEPTH;
725 q_size = clt_path->queue_depth;
728 * x2 for RDMA read responses + FR key invalidations,
729 * RDMA writes do not require any FR registrations.
733 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
735 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
746 struct list_head skip_list;
747 struct rtrs_clt_sess *clt;
748 struct rtrs_clt_path *(*next_path)(struct path_it *it);
752 * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
753 * @head: the head for the list.
754 * @ptr: the list head to take the next element from.
755 * @type: the type of the struct this is embedded in.
756 * @memb: the name of the list_head within the struct.
758 * Next element returned in round-robin fashion, i.e. head will be skipped,
759 * but if list is observed as empty, NULL will be returned.
761 * This primitive may safely run concurrently with the _rcu list-mutation
762 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
764 #define list_next_or_null_rr_rcu(head, ptr, type, memb) \
766 list_next_or_null_rcu(head, ptr, type, memb) ?: \
767 list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
772 * get_next_path_rr() - Returns path in round-robin fashion.
773 * @it: the path pointer
775 * Related to @MP_POLICY_RR
778 * rcu_read_lock() must be hold.
780 static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
782 struct rtrs_clt_path __rcu **ppcpu_path;
783 struct rtrs_clt_path *path;
784 struct rtrs_clt_sess *clt;
789 * Here we use two RCU objects: @paths_list and @pcpu_path
790 * pointer. See rtrs_clt_remove_path_from_arr() for details
791 * how that is handled.
794 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
795 path = rcu_dereference(*ppcpu_path);
797 path = list_first_or_null_rcu(&clt->paths_list,
798 typeof(*path), s.entry);
800 path = list_next_or_null_rr_rcu(&clt->paths_list,
804 rcu_assign_pointer(*ppcpu_path, path);
810 * get_next_path_min_inflight() - Returns path with minimal inflight count.
811 * @it: the path pointer
813 * Related to @MP_POLICY_MIN_INFLIGHT
816 * rcu_read_lock() must be hold.
818 static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
820 struct rtrs_clt_path *min_path = NULL;
821 struct rtrs_clt_sess *clt = it->clt;
822 struct rtrs_clt_path *clt_path;
823 int min_inflight = INT_MAX;
826 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
827 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
830 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
833 inflight = atomic_read(&clt_path->stats->inflight);
835 if (inflight < min_inflight) {
836 min_inflight = inflight;
842 * add the path to the skip list, so that next time we can get
846 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
852 * get_next_path_min_latency() - Returns path with minimal latency.
853 * @it: the path pointer
855 * Return: a path with the lowest latency or NULL if all paths are tried
858 * rcu_read_lock() must be hold.
860 * Related to @MP_POLICY_MIN_LATENCY
862 * This DOES skip an already-tried path.
863 * There is a skip-list to skip a path if the path has tried but failed.
864 * It will try the minimum latency path and then the second minimum latency
865 * path and so on. Finally it will return NULL if all paths are tried.
866 * Therefore the caller MUST check the returned
867 * path is NULL and trigger the IO error.
869 static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
871 struct rtrs_clt_path *min_path = NULL;
872 struct rtrs_clt_sess *clt = it->clt;
873 struct rtrs_clt_path *clt_path;
874 ktime_t min_latency = KTIME_MAX;
877 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
878 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
881 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
884 latency = clt_path->s.hb_cur_latency;
886 if (latency < min_latency) {
887 min_latency = latency;
893 * add the path to the skip list, so that next time we can get
897 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
902 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
904 INIT_LIST_HEAD(&it->skip_list);
908 if (clt->mp_policy == MP_POLICY_RR)
909 it->next_path = get_next_path_rr;
910 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
911 it->next_path = get_next_path_min_inflight;
913 it->next_path = get_next_path_min_latency;
916 static inline void path_it_deinit(struct path_it *it)
918 struct list_head *skip, *tmp;
920 * The skip_list is used only for the MIN_INFLIGHT policy.
921 * We need to remove paths from it, so that next IO can insert
922 * paths (->mp_skip_entry) into a skip_list again.
924 list_for_each_safe(skip, tmp, &it->skip_list)
929 * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
930 * about an inflight IO.
931 * The user buffer holding user control message (not data) is copied into
932 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
933 * also hold the control message of rtrs.
934 * @req: an io request holding information about IO.
935 * @clt_path: client path
936 * @conf: conformation callback function to notify upper layer.
937 * @permit: permit for allocation of RDMA remote buffer
938 * @priv: private pointer
939 * @vec: kernel vector containing control message
940 * @usr_len: length of the user message
941 * @sg: scater list for IO data
942 * @sg_cnt: number of scater list entries
943 * @data_len: length of the IO data
944 * @dir: direction of the IO.
946 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
947 struct rtrs_clt_path *clt_path,
948 void (*conf)(void *priv, int errno),
949 struct rtrs_permit *permit, void *priv,
950 const struct kvec *vec, size_t usr_len,
951 struct scatterlist *sg, size_t sg_cnt,
952 size_t data_len, int dir)
954 struct iov_iter iter;
957 req->permit = permit;
959 req->usr_len = usr_len;
960 req->data_len = data_len;
962 req->sg_cnt = sg_cnt;
965 req->con = rtrs_permit_to_clt_con(clt_path, permit);
967 req->need_inv = false;
968 req->need_inv_comp = false;
970 refcount_set(&req->ref, 1);
971 req->mp_policy = clt_path->clt->mp_policy;
973 iov_iter_kvec(&iter, READ, vec, 1, usr_len);
974 len = _copy_from_iter(req->iu->buf, usr_len, &iter);
975 WARN_ON(len != usr_len);
977 reinit_completion(&req->inv_comp);
980 static struct rtrs_clt_io_req *
981 rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
982 void (*conf)(void *priv, int errno),
983 struct rtrs_permit *permit, void *priv,
984 const struct kvec *vec, size_t usr_len,
985 struct scatterlist *sg, size_t sg_cnt,
986 size_t data_len, int dir)
988 struct rtrs_clt_io_req *req;
990 req = &clt_path->reqs[permit->mem_id];
991 rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
992 sg, sg_cnt, data_len, dir);
996 static struct rtrs_clt_io_req *
997 rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
998 struct rtrs_clt_io_req *fail_req)
1000 struct rtrs_clt_io_req *req;
1002 .iov_base = fail_req->iu->buf,
1003 .iov_len = fail_req->usr_len
1006 req = &alive_path->reqs[fail_req->permit->mem_id];
1007 rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
1008 fail_req->priv, &vec, fail_req->usr_len,
1009 fail_req->sglist, fail_req->sg_cnt,
1010 fail_req->data_len, fail_req->dir);
1014 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1015 struct rtrs_clt_io_req *req,
1016 struct rtrs_rbuf *rbuf, bool fr_en,
1017 u32 size, u32 imm, struct ib_send_wr *wr,
1018 struct ib_send_wr *tail)
1020 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1021 struct ib_sge *sge = req->sge;
1022 enum ib_send_flags flags;
1023 struct scatterlist *sg;
1026 struct ib_send_wr *ptail = NULL;
1030 sge[i].addr = req->mr->iova;
1031 sge[i].length = req->mr->length;
1032 sge[i].lkey = req->mr->lkey;
1037 for_each_sg(req->sglist, sg, req->sg_cnt, i) {
1038 sge[i].addr = sg_dma_address(sg);
1039 sge[i].length = sg_dma_len(sg);
1040 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1042 num_sge = 1 + req->sg_cnt;
1044 sge[i].addr = req->iu->dma_addr;
1045 sge[i].length = size;
1046 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1049 * From time to time we have to post signalled sends,
1050 * or send queue will fill up and only QP reset can help.
1052 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1053 0 : IB_SEND_SIGNALED;
1055 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
1057 size, DMA_TO_DEVICE);
1059 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1060 rbuf->rkey, rbuf->addr, imm,
1064 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1068 /* Align the MR to a 4K page size to match the block virt boundary */
1069 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1072 if (nr < req->sg_cnt)
1074 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1079 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
1081 struct rtrs_clt_con *con = req->con;
1082 struct rtrs_path *s = con->c.path;
1083 struct rtrs_clt_path *clt_path = to_clt_path(s);
1084 struct rtrs_msg_rdma_write *msg;
1086 struct rtrs_rbuf *rbuf;
1089 struct ib_reg_wr rwr;
1090 struct ib_send_wr inv_wr;
1091 struct ib_send_wr *wr = NULL;
1094 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1096 if (tsize > clt_path->chunk_size) {
1097 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
1098 tsize, clt_path->chunk_size);
1102 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
1103 req->sg_cnt, req->dir);
1105 rtrs_wrn(s, "Write request failed, map failed\n");
1109 /* put rtrs msg after sg and user message */
1110 msg = req->iu->buf + req->usr_len;
1111 msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1112 msg->usr_len = cpu_to_le16(req->usr_len);
1114 /* rtrs message on server side will be after user data and message */
1115 imm = req->permit->mem_off + req->data_len + req->usr_len;
1116 imm = rtrs_to_io_req_imm(imm);
1117 buf_id = req->permit->mem_id;
1118 req->sg_size = tsize;
1119 rbuf = &clt_path->rbufs[buf_id];
1122 ret = rtrs_map_sg_fr(req, count);
1125 "Write request failed, failed to map fast reg. data, err: %d\n",
1127 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1128 req->sg_cnt, req->dir);
1131 inv_wr = (struct ib_send_wr) {
1132 .opcode = IB_WR_LOCAL_INV,
1133 .wr_cqe = &req->inv_cqe,
1134 .send_flags = IB_SEND_SIGNALED,
1135 .ex.invalidate_rkey = req->mr->rkey,
1137 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
1138 rwr = (struct ib_reg_wr) {
1139 .wr.opcode = IB_WR_REG_MR,
1140 .wr.wr_cqe = &fast_reg_cqe,
1142 .key = req->mr->rkey,
1143 .access = (IB_ACCESS_LOCAL_WRITE),
1147 refcount_inc(&req->ref);
1150 * Update stats now, after request is successfully sent it is not
1151 * safe anymore to touch it.
1153 rtrs_clt_update_all_stats(req, WRITE);
1155 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
1156 req->usr_len + sizeof(*msg),
1160 "Write request failed: error=%d path=%s [%s:%u]\n",
1161 ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1162 clt_path->hca_port);
1163 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1164 atomic_dec(&clt_path->stats->inflight);
1166 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1167 req->sg_cnt, req->dir);
1173 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1175 struct rtrs_clt_con *con = req->con;
1176 struct rtrs_path *s = con->c.path;
1177 struct rtrs_clt_path *clt_path = to_clt_path(s);
1178 struct rtrs_msg_rdma_read *msg;
1179 struct rtrs_ib_dev *dev = clt_path->s.dev;
1181 struct ib_reg_wr rwr;
1182 struct ib_send_wr *wr = NULL;
1187 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1189 if (tsize > clt_path->chunk_size) {
1191 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1192 tsize, clt_path->chunk_size);
1197 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1201 "Read request failed, dma map failed\n");
1205 /* put our message into req->buf after user message*/
1206 msg = req->iu->buf + req->usr_len;
1207 msg->type = cpu_to_le16(RTRS_MSG_READ);
1208 msg->usr_len = cpu_to_le16(req->usr_len);
1211 ret = rtrs_map_sg_fr(req, count);
1214 "Read request failed, failed to map fast reg. data, err: %d\n",
1216 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1220 rwr = (struct ib_reg_wr) {
1221 .wr.opcode = IB_WR_REG_MR,
1222 .wr.wr_cqe = &fast_reg_cqe,
1224 .key = req->mr->rkey,
1225 .access = (IB_ACCESS_LOCAL_WRITE |
1226 IB_ACCESS_REMOTE_WRITE),
1230 msg->sg_cnt = cpu_to_le16(1);
1231 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1233 msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1234 msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1235 msg->desc[0].len = cpu_to_le32(req->mr->length);
1237 /* Further invalidation is required */
1238 req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1245 * rtrs message will be after the space reserved for disk data and
1248 imm = req->permit->mem_off + req->data_len + req->usr_len;
1249 imm = rtrs_to_io_req_imm(imm);
1250 buf_id = req->permit->mem_id;
1252 req->sg_size = sizeof(*msg);
1253 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1254 req->sg_size += req->usr_len;
1257 * Update stats now, after request is successfully sent it is not
1258 * safe anymore to touch it.
1260 rtrs_clt_update_all_stats(req, READ);
1262 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1263 req->data_len, imm, wr);
1266 "Read request failed: error=%d path=%s [%s:%u]\n",
1267 ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1268 clt_path->hca_port);
1269 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1270 atomic_dec(&clt_path->stats->inflight);
1271 req->need_inv = false;
1273 ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1274 req->sg_cnt, req->dir);
1281 * rtrs_clt_failover_req() - Try to find an active path for a failed request
1283 * @fail_req: a failed io request.
1285 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
1286 struct rtrs_clt_io_req *fail_req)
1288 struct rtrs_clt_path *alive_path;
1289 struct rtrs_clt_io_req *req;
1290 int err = -ECONNABORTED;
1294 for (path_it_init(&it, clt);
1295 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
1297 if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
1299 req = rtrs_clt_get_copy_req(alive_path, fail_req);
1300 if (req->dir == DMA_TO_DEVICE)
1301 err = rtrs_clt_write_req(req);
1303 err = rtrs_clt_read_req(req);
1305 req->in_use = false;
1309 rtrs_clt_inc_failover_cnt(alive_path->stats);
1312 path_it_deinit(&it);
1318 static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
1320 struct rtrs_clt_sess *clt = clt_path->clt;
1321 struct rtrs_clt_io_req *req;
1324 if (!clt_path->reqs)
1326 for (i = 0; i < clt_path->queue_depth; ++i) {
1327 req = &clt_path->reqs[i];
1332 * Safely (without notification) complete failed request.
1333 * After completion this request is still useble and can
1334 * be failovered to another path.
1336 complete_rdma_req(req, -ECONNABORTED, false, true);
1338 err = rtrs_clt_failover_req(clt, req);
1340 /* Failover failed, notify anyway */
1341 req->conf(req->priv, err);
1345 static void free_path_reqs(struct rtrs_clt_path *clt_path)
1347 struct rtrs_clt_io_req *req;
1350 if (!clt_path->reqs)
1352 for (i = 0; i < clt_path->queue_depth; ++i) {
1353 req = &clt_path->reqs[i];
1355 ib_dereg_mr(req->mr);
1357 rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
1359 kfree(clt_path->reqs);
1360 clt_path->reqs = NULL;
1363 static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
1365 struct rtrs_clt_io_req *req;
1366 int i, err = -ENOMEM;
1368 clt_path->reqs = kcalloc(clt_path->queue_depth,
1369 sizeof(*clt_path->reqs),
1371 if (!clt_path->reqs)
1374 for (i = 0; i < clt_path->queue_depth; ++i) {
1375 req = &clt_path->reqs[i];
1376 req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
1377 clt_path->s.dev->ib_dev,
1379 rtrs_clt_rdma_done);
1383 req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
1387 req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
1389 clt_path->max_pages_per_mr);
1390 if (IS_ERR(req->mr)) {
1391 err = PTR_ERR(req->mr);
1393 pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
1394 clt_path->max_pages_per_mr);
1398 init_completion(&req->inv_comp);
1404 free_path_reqs(clt_path);
1409 static int alloc_permits(struct rtrs_clt_sess *clt)
1411 unsigned int chunk_bits;
1414 clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
1415 sizeof(long), GFP_KERNEL);
1416 if (!clt->permits_map) {
1420 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1421 if (!clt->permits) {
1425 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1426 for (i = 0; i < clt->queue_depth; i++) {
1427 struct rtrs_permit *permit;
1429 permit = get_permit(clt, i);
1431 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1437 kfree(clt->permits_map);
1438 clt->permits_map = NULL;
1443 static void free_permits(struct rtrs_clt_sess *clt)
1445 if (clt->permits_map) {
1446 size_t sz = clt->queue_depth;
1448 wait_event(clt->permits_wait,
1449 find_first_bit(clt->permits_map, sz) >= sz);
1451 kfree(clt->permits_map);
1452 clt->permits_map = NULL;
1453 kfree(clt->permits);
1454 clt->permits = NULL;
1457 static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
1459 struct ib_device *ib_dev;
1460 u64 max_pages_per_mr;
1463 ib_dev = clt_path->s.dev->ib_dev;
1466 * Use the smallest page size supported by the HCA, down to a
1467 * minimum of 4096 bytes. We're unlikely to build large sglists
1468 * out of smaller entries.
1470 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1471 max_pages_per_mr = ib_dev->attrs.max_mr_size;
1472 do_div(max_pages_per_mr, (1ull << mr_page_shift));
1473 clt_path->max_pages_per_mr =
1474 min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
1475 ib_dev->attrs.max_fast_reg_page_list_len);
1476 clt_path->clt->max_segments =
1477 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
1480 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
1481 enum rtrs_clt_state new_state,
1482 enum rtrs_clt_state *old_state)
1486 spin_lock_irq(&clt_path->state_wq.lock);
1488 *old_state = clt_path->state;
1489 changed = rtrs_clt_change_state(clt_path, new_state);
1490 spin_unlock_irq(&clt_path->state_wq.lock);
1495 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1497 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1499 rtrs_rdma_error_recovery(con);
1502 static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
1504 rtrs_init_hb(&clt_path->s, &io_comp_cqe,
1505 RTRS_HB_INTERVAL_MS,
1507 rtrs_clt_hb_err_handler,
1511 static void rtrs_clt_reconnect_work(struct work_struct *work);
1512 static void rtrs_clt_close_work(struct work_struct *work);
1514 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
1515 const struct rtrs_addr *path,
1516 size_t con_num, u32 nr_poll_queues)
1518 struct rtrs_clt_path *clt_path;
1523 clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
1529 * +1: Extra connection for user messages
1531 total_con = con_num + nr_poll_queues + 1;
1532 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
1534 if (!clt_path->s.con)
1537 clt_path->s.con_num = total_con;
1538 clt_path->s.irq_con_num = con_num + 1;
1540 clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
1541 if (!clt_path->stats)
1544 mutex_init(&clt_path->init_mutex);
1545 uuid_gen(&clt_path->s.uuid);
1546 memcpy(&clt_path->s.dst_addr, path->dst,
1547 rdma_addr_size((struct sockaddr *)path->dst));
1550 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1551 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1552 * the sess->src_addr will contain only zeros, which is then fine.
1555 memcpy(&clt_path->s.src_addr, path->src,
1556 rdma_addr_size((struct sockaddr *)path->src));
1557 strscpy(clt_path->s.sessname, clt->sessname,
1558 sizeof(clt_path->s.sessname));
1559 clt_path->clt = clt;
1560 clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
1561 init_waitqueue_head(&clt_path->state_wq);
1562 clt_path->state = RTRS_CLT_CONNECTING;
1563 atomic_set(&clt_path->connected_cnt, 0);
1564 INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
1565 INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
1566 rtrs_clt_init_hb(clt_path);
1568 clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
1569 if (!clt_path->mp_skip_entry)
1570 goto err_free_stats;
1572 for_each_possible_cpu(cpu)
1573 INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
1575 err = rtrs_clt_init_stats(clt_path->stats);
1577 goto err_free_percpu;
1582 free_percpu(clt_path->mp_skip_entry);
1584 kfree(clt_path->stats);
1586 kfree(clt_path->s.con);
1590 return ERR_PTR(err);
1593 void free_path(struct rtrs_clt_path *clt_path)
1595 free_percpu(clt_path->mp_skip_entry);
1596 mutex_destroy(&clt_path->init_mutex);
1597 kfree(clt_path->s.con);
1598 kfree(clt_path->rbufs);
1602 static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
1604 struct rtrs_clt_con *con;
1606 con = kzalloc(sizeof(*con), GFP_KERNEL);
1610 /* Map first two connections to the first CPU */
1611 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1613 con->c.path = &clt_path->s;
1614 /* Align with srv, init as 1 */
1615 atomic_set(&con->c.wr_cnt, 1);
1616 mutex_init(&con->con_mutex);
1618 clt_path->s.con[cid] = &con->c;
1623 static void destroy_con(struct rtrs_clt_con *con)
1625 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1627 clt_path->s.con[con->c.cid] = NULL;
1628 mutex_destroy(&con->con_mutex);
1632 static int create_con_cq_qp(struct rtrs_clt_con *con)
1634 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1635 u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
1637 struct rtrs_msg_rkey_rsp *rsp;
1639 lockdep_assert_held(&con->con_mutex);
1640 if (con->c.cid == 0) {
1642 /* We must be the first here */
1643 if (WARN_ON(clt_path->s.dev))
1647 * The whole session uses device from user connection.
1648 * Be careful not to close user connection before ib dev
1649 * is gracefully put.
1651 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1653 if (!clt_path->s.dev) {
1654 rtrs_wrn(clt_path->clt,
1655 "rtrs_ib_dev_find_get_or_add(): no memory\n");
1658 clt_path->s.dev_ref = 1;
1659 query_fast_reg_mode(clt_path);
1660 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1662 * Two (request + registration) completion for send
1663 * Two for recv if always_invalidate is set on server
1665 * + 2 for drain and heartbeat
1666 * in case qp gets into error state.
1669 min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1670 max_recv_wr = max_send_wr;
1673 * Here we assume that session members are correctly set.
1674 * This is always true if user connection (cid == 0) is
1675 * established first.
1677 if (WARN_ON(!clt_path->s.dev))
1679 if (WARN_ON(!clt_path->queue_depth))
1682 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1683 /* Shared between connections */
1684 clt_path->s.dev_ref++;
1685 max_send_wr = min_t(int, wr_limit,
1686 /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1687 clt_path->queue_depth * 3 + 1);
1688 max_recv_wr = min_t(int, wr_limit,
1689 clt_path->queue_depth * 3 + 1);
1692 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1693 cq_num = max_send_wr + max_recv_wr;
1694 /* alloc iu to recv new rkey reply when server reports flags set */
1695 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1696 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1698 clt_path->s.dev->ib_dev,
1700 rtrs_clt_rdma_done);
1703 con->queue_num = cq_num;
1705 cq_num = max_send_wr + max_recv_wr;
1706 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1707 if (con->c.cid >= clt_path->s.irq_con_num)
1708 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1709 cq_vector, cq_num, max_send_wr,
1710 max_recv_wr, IB_POLL_DIRECT);
1712 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1713 cq_vector, cq_num, max_send_wr,
1714 max_recv_wr, IB_POLL_SOFTIRQ);
1716 * In case of error we do not bother to clean previous allocations,
1717 * since destroy_con_cq_qp() must be called.
1722 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1724 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1727 * Be careful here: destroy_con_cq_qp() can be called even
1728 * create_con_cq_qp() failed, see comments there.
1730 lockdep_assert_held(&con->con_mutex);
1731 rtrs_cq_qp_destroy(&con->c);
1733 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1735 con->rsp_ius = NULL;
1738 if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
1739 rtrs_ib_dev_put(clt_path->s.dev);
1740 clt_path->s.dev = NULL;
1744 static void stop_cm(struct rtrs_clt_con *con)
1746 rdma_disconnect(con->c.cm_id);
1748 ib_drain_qp(con->c.qp);
1751 static void destroy_cm(struct rtrs_clt_con *con)
1753 rdma_destroy_id(con->c.cm_id);
1754 con->c.cm_id = NULL;
1757 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1759 struct rtrs_path *s = con->c.path;
1762 mutex_lock(&con->con_mutex);
1763 err = create_con_cq_qp(con);
1764 mutex_unlock(&con->con_mutex);
1766 rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1769 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1771 rtrs_err(s, "Resolving route failed, err: %d\n", err);
1776 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1778 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1779 struct rtrs_clt_sess *clt = clt_path->clt;
1780 struct rtrs_msg_conn_req msg;
1781 struct rdma_conn_param param;
1785 param = (struct rdma_conn_param) {
1787 .rnr_retry_count = 7,
1788 .private_data = &msg,
1789 .private_data_len = sizeof(msg),
1792 msg = (struct rtrs_msg_conn_req) {
1793 .magic = cpu_to_le16(RTRS_MAGIC),
1794 .version = cpu_to_le16(RTRS_PROTO_VER),
1795 .cid = cpu_to_le16(con->c.cid),
1796 .cid_num = cpu_to_le16(clt_path->s.con_num),
1797 .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
1799 msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
1800 uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
1801 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1803 err = rdma_connect_locked(con->c.cm_id, ¶m);
1805 rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1810 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1811 struct rdma_cm_event *ev)
1813 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1814 struct rtrs_clt_sess *clt = clt_path->clt;
1815 const struct rtrs_msg_conn_rsp *msg;
1816 u16 version, queue_depth;
1820 msg = ev->param.conn.private_data;
1821 len = ev->param.conn.private_data_len;
1822 if (len < sizeof(*msg)) {
1823 rtrs_err(clt, "Invalid RTRS connection response\n");
1826 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1827 rtrs_err(clt, "Invalid RTRS magic\n");
1830 version = le16_to_cpu(msg->version);
1831 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1832 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1833 version >> 8, RTRS_PROTO_VER_MAJOR);
1836 errno = le16_to_cpu(msg->errno);
1838 rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1842 if (con->c.cid == 0) {
1843 queue_depth = le16_to_cpu(msg->queue_depth);
1845 if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
1846 rtrs_err(clt, "Error: queue depth changed\n");
1849 * Stop any more reconnection attempts
1851 clt_path->reconnect_attempts = -1;
1853 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1857 if (!clt_path->rbufs) {
1858 clt_path->rbufs = kcalloc(queue_depth,
1859 sizeof(*clt_path->rbufs),
1861 if (!clt_path->rbufs)
1864 clt_path->queue_depth = queue_depth;
1865 clt_path->s.signal_interval = min_not_zero(queue_depth,
1866 (unsigned short) SERVICE_CON_QUEUE_DEPTH);
1867 clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1868 clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
1869 clt_path->flags = le32_to_cpu(msg->flags);
1870 clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
1873 * Global IO size is always a minimum.
1874 * If while a reconnection server sends us a value a bit
1875 * higher - client does not care and uses cached minimum.
1877 * Since we can have several sessions (paths) restablishing
1878 * connections in parallel, use lock.
1880 mutex_lock(&clt->paths_mutex);
1881 clt->queue_depth = clt_path->queue_depth;
1882 clt->max_io_size = min_not_zero(clt_path->max_io_size,
1884 mutex_unlock(&clt->paths_mutex);
1887 * Cache the hca_port and hca_name for sysfs
1889 clt_path->hca_port = con->c.cm_id->port_num;
1890 scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
1891 clt_path->s.dev->ib_dev->name);
1892 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1893 /* set for_new_clt, to allow future reconnect on any path */
1894 clt_path->for_new_clt = 1;
1900 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1902 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1904 atomic_inc(&clt_path->connected_cnt);
1908 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1909 struct rdma_cm_event *ev)
1911 struct rtrs_path *s = con->c.path;
1912 const struct rtrs_msg_conn_rsp *msg;
1913 const char *rej_msg;
1917 status = ev->status;
1918 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1919 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1921 if (msg && data_len >= sizeof(*msg)) {
1922 errno = (int16_t)le16_to_cpu(msg->errno);
1923 if (errno == -EBUSY)
1925 "Previous session is still exists on the server, please reconnect later\n");
1928 "Connect rejected: status %d (%s), rtrs errno %d\n",
1929 status, rej_msg, errno);
1932 "Connect rejected but with malformed message: status %d (%s)\n",
1939 void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
1941 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
1942 queue_work(rtrs_wq, &clt_path->close_work);
1944 flush_work(&clt_path->close_work);
1947 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1949 if (con->cm_err == 1) {
1950 struct rtrs_clt_path *clt_path;
1952 clt_path = to_clt_path(con->c.path);
1953 if (atomic_dec_and_test(&clt_path->connected_cnt))
1955 wake_up(&clt_path->state_wq);
1957 con->cm_err = cm_err;
1960 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1961 struct rdma_cm_event *ev)
1963 struct rtrs_clt_con *con = cm_id->context;
1964 struct rtrs_path *s = con->c.path;
1965 struct rtrs_clt_path *clt_path = to_clt_path(s);
1968 switch (ev->event) {
1969 case RDMA_CM_EVENT_ADDR_RESOLVED:
1970 cm_err = rtrs_rdma_addr_resolved(con);
1972 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1973 cm_err = rtrs_rdma_route_resolved(con);
1975 case RDMA_CM_EVENT_ESTABLISHED:
1976 cm_err = rtrs_rdma_conn_established(con, ev);
1979 * Report success and wake up. Here we abuse state_wq,
1980 * i.e. wake up without state change, but we set cm_err.
1982 flag_success_on_conn(con);
1983 wake_up(&clt_path->state_wq);
1987 case RDMA_CM_EVENT_REJECTED:
1988 cm_err = rtrs_rdma_conn_rejected(con, ev);
1990 case RDMA_CM_EVENT_DISCONNECTED:
1991 /* No message for disconnecting */
1992 cm_err = -ECONNRESET;
1994 case RDMA_CM_EVENT_CONNECT_ERROR:
1995 case RDMA_CM_EVENT_UNREACHABLE:
1996 case RDMA_CM_EVENT_ADDR_CHANGE:
1997 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1998 rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
1999 rdma_event_msg(ev->event), ev->status);
2000 cm_err = -ECONNRESET;
2002 case RDMA_CM_EVENT_ADDR_ERROR:
2003 case RDMA_CM_EVENT_ROUTE_ERROR:
2004 rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
2005 rdma_event_msg(ev->event), ev->status);
2006 cm_err = -EHOSTUNREACH;
2008 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2010 * Device removal is a special case. Queue close and return 0.
2012 rtrs_clt_close_conns(clt_path, false);
2015 rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
2016 rdma_event_msg(ev->event), ev->status);
2017 cm_err = -ECONNRESET;
2023 * cm error makes sense only on connection establishing,
2024 * in other cases we rely on normal procedure of reconnecting.
2026 flag_error_on_conn(con, cm_err);
2027 rtrs_rdma_error_recovery(con);
2033 static int create_cm(struct rtrs_clt_con *con)
2035 struct rtrs_path *s = con->c.path;
2036 struct rtrs_clt_path *clt_path = to_clt_path(s);
2037 struct rdma_cm_id *cm_id;
2040 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2041 clt_path->s.dst_addr.ss_family == AF_IB ?
2042 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
2043 if (IS_ERR(cm_id)) {
2044 err = PTR_ERR(cm_id);
2045 rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
2049 con->c.cm_id = cm_id;
2051 /* allow the port to be reused */
2052 err = rdma_set_reuseaddr(cm_id, 1);
2054 rtrs_err(s, "Set address reuse failed, err: %d\n", err);
2057 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
2058 (struct sockaddr *)&clt_path->s.dst_addr,
2059 RTRS_CONNECT_TIMEOUT_MS);
2061 rtrs_err(s, "Failed to resolve address, err: %d\n", err);
2065 * Combine connection status and session events. This is needed
2066 * for waiting two possible cases: cm_err has something meaningful
2067 * or session state was really changed to error by device removal.
2069 err = wait_event_interruptible_timeout(
2071 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2072 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2073 if (err == 0 || err == -ERESTARTSYS) {
2076 /* Timedout or interrupted */
2079 if (con->cm_err < 0) {
2083 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
2084 /* Device removal */
2085 err = -ECONNABORTED;
2093 mutex_lock(&con->con_mutex);
2094 destroy_con_cq_qp(con);
2095 mutex_unlock(&con->con_mutex);
2102 static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
2104 struct rtrs_clt_sess *clt = clt_path->clt;
2108 * We can fire RECONNECTED event only when all paths were
2109 * connected on rtrs_clt_open(), then each was disconnected
2110 * and the first one connected again. That's why this nasty
2111 * game with counter value.
2114 mutex_lock(&clt->paths_ev_mutex);
2115 up = ++clt->paths_up;
2117 * Here it is safe to access paths num directly since up counter
2118 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
2119 * in progress, thus paths removals are impossible.
2121 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2122 clt->paths_up = clt->paths_num;
2124 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2125 mutex_unlock(&clt->paths_ev_mutex);
2127 /* Mark session as established */
2128 clt_path->established = true;
2129 clt_path->reconnect_attempts = 0;
2130 clt_path->stats->reconnects.successful_cnt++;
2133 static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
2135 struct rtrs_clt_sess *clt = clt_path->clt;
2137 if (!clt_path->established)
2140 clt_path->established = false;
2141 mutex_lock(&clt->paths_ev_mutex);
2142 WARN_ON(!clt->paths_up);
2143 if (--clt->paths_up == 0)
2144 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2145 mutex_unlock(&clt->paths_ev_mutex);
2148 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
2150 struct rtrs_clt_con *con;
2153 WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
2156 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2157 * exactly in between. Start destroying after it finishes.
2159 mutex_lock(&clt_path->init_mutex);
2160 mutex_unlock(&clt_path->init_mutex);
2163 * All IO paths must observe !CONNECTED state before we
2168 rtrs_stop_hb(&clt_path->s);
2171 * The order it utterly crucial: firstly disconnect and complete all
2172 * rdma requests with error (thus set in_use=false for requests),
2173 * then fail outstanding requests checking in_use for each, and
2174 * eventually notify upper layer about session disconnection.
2177 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2178 if (!clt_path->s.con[cid])
2180 con = to_clt_con(clt_path->s.con[cid]);
2183 fail_all_outstanding_reqs(clt_path);
2184 free_path_reqs(clt_path);
2185 rtrs_clt_path_down(clt_path);
2188 * Wait for graceful shutdown, namely when peer side invokes
2189 * rdma_disconnect(). 'connected_cnt' is decremented only on
2190 * CM events, thus if other side had crashed and hb has detected
2191 * something is wrong, here we will stuck for exactly timeout ms,
2192 * since CM does not fire anything. That is fine, we are not in
2195 wait_event_timeout(clt_path->state_wq,
2196 !atomic_read(&clt_path->connected_cnt),
2197 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2199 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2200 if (!clt_path->s.con[cid])
2202 con = to_clt_con(clt_path->s.con[cid]);
2203 mutex_lock(&con->con_mutex);
2204 destroy_con_cq_qp(con);
2205 mutex_unlock(&con->con_mutex);
2211 static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
2212 struct rtrs_clt_path *clt_path,
2213 struct rtrs_clt_path *next)
2215 struct rtrs_clt_path **ppcpu_path;
2217 /* Call cmpxchg() without sparse warnings */
2218 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
2219 return clt_path == cmpxchg(ppcpu_path, clt_path, next);
2222 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
2224 struct rtrs_clt_sess *clt = clt_path->clt;
2225 struct rtrs_clt_path *next;
2226 bool wait_for_grace = false;
2229 mutex_lock(&clt->paths_mutex);
2230 list_del_rcu(&clt_path->s.entry);
2232 /* Make sure everybody observes path removal. */
2236 * At this point nobody sees @sess in the list, but still we have
2237 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2238 * nobody can observe @sess in the list, we guarantee that IO path
2239 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2240 * to @sess, but can never again become @sess.
2244 * Decrement paths number only after grace period, because
2245 * caller of do_each_path() must firstly observe list without
2246 * path and only then decremented paths number.
2248 * Otherwise there can be the following situation:
2249 * o Two paths exist and IO is coming.
2250 * o One path is removed:
2252 * do_each_path(): rtrs_clt_remove_path_from_arr():
2253 * path = get_next_path()
2254 * ^^^ list_del_rcu(path)
2255 * [!CONNECTED path] clt->paths_num--
2257 * load clt->paths_num from 2 to 1
2261 * path is observed as !CONNECTED, but do_each_path() loop
2262 * ends, because expression i < clt->paths_num is false.
2267 * Get @next connection from current @sess which is going to be
2268 * removed. If @sess is the last element, then @next is NULL.
2271 next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
2272 typeof(*next), s.entry);
2276 * @pcpu paths can still point to the path which is going to be
2277 * removed, so change the pointer manually.
2279 for_each_possible_cpu(cpu) {
2280 struct rtrs_clt_path __rcu **ppcpu_path;
2282 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2283 if (rcu_dereference_protected(*ppcpu_path,
2284 lockdep_is_held(&clt->paths_mutex)) != clt_path)
2286 * synchronize_rcu() was called just after deleting
2287 * entry from the list, thus IO code path cannot
2288 * change pointer back to the pointer which is going
2289 * to be removed, we are safe here.
2294 * We race with IO code path, which also changes pointer,
2295 * thus we have to be careful not to overwrite it.
2297 if (xchg_paths(ppcpu_path, clt_path, next))
2299 * @ppcpu_path was successfully replaced with @next,
2300 * that means that someone could also pick up the
2301 * @sess and dereferencing it right now, so wait for
2302 * a grace period is required.
2304 wait_for_grace = true;
2309 mutex_unlock(&clt->paths_mutex);
2312 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
2314 struct rtrs_clt_sess *clt = clt_path->clt;
2316 mutex_lock(&clt->paths_mutex);
2319 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2320 mutex_unlock(&clt->paths_mutex);
2323 static void rtrs_clt_close_work(struct work_struct *work)
2325 struct rtrs_clt_path *clt_path;
2327 clt_path = container_of(work, struct rtrs_clt_path, close_work);
2329 cancel_delayed_work_sync(&clt_path->reconnect_dwork);
2330 rtrs_clt_stop_and_destroy_conns(clt_path);
2331 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
2334 static int init_conns(struct rtrs_clt_path *clt_path)
2340 * On every new session connections increase reconnect counter
2341 * to avoid clashes with previous sessions not yet closed
2342 * sessions on a server side.
2344 clt_path->s.recon_cnt++;
2346 /* Establish all RDMA connections */
2347 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2348 err = create_con(clt_path, cid);
2352 err = create_cm(to_clt_con(clt_path->s.con[cid]));
2354 destroy_con(to_clt_con(clt_path->s.con[cid]));
2358 err = alloc_path_reqs(clt_path);
2362 rtrs_start_hb(&clt_path->s);
2368 struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
2372 mutex_lock(&con->con_mutex);
2373 destroy_con_cq_qp(con);
2374 mutex_unlock(&con->con_mutex);
2379 * If we've never taken async path and got an error, say,
2380 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2381 * manually to keep reconnecting.
2383 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2388 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2390 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2391 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2394 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2395 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2397 if (wc->status != IB_WC_SUCCESS) {
2398 rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
2399 ib_wc_status_msg(wc->status));
2400 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2404 rtrs_clt_update_wc_stats(con);
2407 static int process_info_rsp(struct rtrs_clt_path *clt_path,
2408 const struct rtrs_msg_info_rsp *msg)
2410 unsigned int sg_cnt, total_len;
2413 sg_cnt = le16_to_cpu(msg->sg_cnt);
2414 if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
2415 rtrs_err(clt_path->clt,
2416 "Incorrect sg_cnt %d, is not multiple\n",
2422 * Check if IB immediate data size is enough to hold the mem_id and
2423 * the offset inside the memory chunk.
2425 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
2426 MAX_IMM_PAYL_BITS) {
2427 rtrs_err(clt_path->clt,
2428 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2429 MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
2433 for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
2434 const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2438 addr = le64_to_cpu(desc->addr);
2439 rkey = le32_to_cpu(desc->key);
2440 len = le32_to_cpu(desc->len);
2444 if (!len || (len % clt_path->chunk_size)) {
2445 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
2450 for ( ; len && i < clt_path->queue_depth; i++) {
2451 clt_path->rbufs[i].addr = addr;
2452 clt_path->rbufs[i].rkey = rkey;
2454 len -= clt_path->chunk_size;
2455 addr += clt_path->chunk_size;
2459 if (sgi != sg_cnt || i != clt_path->queue_depth) {
2460 rtrs_err(clt_path->clt,
2461 "Incorrect sg vector, not fully mapped\n");
2464 if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
2465 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
2472 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2474 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2475 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2476 struct rtrs_msg_info_rsp *msg;
2477 enum rtrs_clt_state state;
2482 state = RTRS_CLT_CONNECTING_ERR;
2484 WARN_ON(con->c.cid);
2485 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2486 if (wc->status != IB_WC_SUCCESS) {
2487 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
2488 ib_wc_status_msg(wc->status));
2491 WARN_ON(wc->opcode != IB_WC_RECV);
2493 if (wc->byte_len < sizeof(*msg)) {
2494 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2498 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
2499 iu->size, DMA_FROM_DEVICE);
2501 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
2502 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
2503 le16_to_cpu(msg->type));
2506 rx_sz = sizeof(*msg);
2507 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2508 if (wc->byte_len < rx_sz) {
2509 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2513 err = process_info_rsp(clt_path, msg);
2517 err = post_recv_path(clt_path);
2521 state = RTRS_CLT_CONNECTED;
2524 rtrs_clt_update_wc_stats(con);
2525 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2526 rtrs_clt_change_state_get_old(clt_path, state, NULL);
2529 static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
2531 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
2532 struct rtrs_msg_info_req *msg;
2533 struct rtrs_iu *tx_iu, *rx_iu;
2537 rx_sz = sizeof(struct rtrs_msg_info_rsp);
2538 rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
2540 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2541 clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
2542 rtrs_clt_info_req_done);
2543 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
2544 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2545 if (!tx_iu || !rx_iu) {
2549 /* Prepare for getting info response */
2550 err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2552 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2558 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2559 memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
2561 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
2563 tx_iu->size, DMA_TO_DEVICE);
2565 /* Send info request */
2566 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2568 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
2573 /* Wait for state change */
2574 wait_event_interruptible_timeout(clt_path->state_wq,
2575 clt_path->state != RTRS_CLT_CONNECTING,
2577 RTRS_CONNECT_TIMEOUT_MS));
2578 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
2579 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
2587 rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
2589 rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
2591 /* If we've never taken async path because of malloc problems */
2592 rtrs_clt_change_state_get_old(clt_path,
2593 RTRS_CLT_CONNECTING_ERR, NULL);
2599 * init_path() - establishes all path connections and does handshake
2600 * @clt_path: client path.
2601 * In case of error full close or reconnect procedure should be taken,
2602 * because reconnect or close async works can be started.
2604 static int init_path(struct rtrs_clt_path *clt_path)
2608 struct rtrs_addr path = {
2609 .src = &clt_path->s.src_addr,
2610 .dst = &clt_path->s.dst_addr,
2613 rtrs_addr_to_str(&path, str, sizeof(str));
2615 mutex_lock(&clt_path->init_mutex);
2616 err = init_conns(clt_path);
2618 rtrs_err(clt_path->clt,
2619 "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
2620 str, clt_path->hca_name, clt_path->hca_port);
2623 err = rtrs_send_path_info(clt_path);
2625 rtrs_err(clt_path->clt,
2626 "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
2627 err, str, clt_path->hca_name, clt_path->hca_port);
2630 rtrs_clt_path_up(clt_path);
2632 mutex_unlock(&clt_path->init_mutex);
2637 static void rtrs_clt_reconnect_work(struct work_struct *work)
2639 struct rtrs_clt_path *clt_path;
2640 struct rtrs_clt_sess *clt;
2641 unsigned int delay_ms;
2644 clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
2646 clt = clt_path->clt;
2648 if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
2651 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
2652 /* Close a path completely if max attempts is reached */
2653 rtrs_clt_close_conns(clt_path, false);
2656 clt_path->reconnect_attempts++;
2658 /* Stop everything */
2659 rtrs_clt_stop_and_destroy_conns(clt_path);
2660 msleep(RTRS_RECONNECT_BACKOFF);
2661 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
2662 err = init_path(clt_path);
2664 goto reconnect_again;
2670 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
2671 clt_path->stats->reconnects.fail_cnt++;
2672 delay_ms = clt->reconnect_delay_sec * 1000;
2673 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
2674 msecs_to_jiffies(delay_ms +
2676 RTRS_RECONNECT_SEED));
2680 static void rtrs_clt_dev_release(struct device *dev)
2682 struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
2688 static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
2689 u16 port, size_t pdu_sz, void *priv,
2690 void (*link_ev)(void *priv,
2691 enum rtrs_clt_link_ev ev),
2692 unsigned int reconnect_delay_sec,
2693 unsigned int max_reconnect_attempts)
2695 struct rtrs_clt_sess *clt;
2698 if (!paths_num || paths_num > MAX_PATHS_NUM)
2699 return ERR_PTR(-EINVAL);
2701 if (strlen(sessname) >= sizeof(clt->sessname))
2702 return ERR_PTR(-EINVAL);
2704 clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2706 return ERR_PTR(-ENOMEM);
2708 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2709 if (!clt->pcpu_path) {
2711 return ERR_PTR(-ENOMEM);
2714 uuid_gen(&clt->paths_uuid);
2715 INIT_LIST_HEAD_RCU(&clt->paths_list);
2716 clt->paths_num = paths_num;
2717 clt->paths_up = MAX_PATHS_NUM;
2719 clt->pdu_sz = pdu_sz;
2720 clt->max_segments = RTRS_MAX_SEGMENTS;
2721 clt->reconnect_delay_sec = reconnect_delay_sec;
2722 clt->max_reconnect_attempts = max_reconnect_attempts;
2724 clt->link_ev = link_ev;
2725 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2726 strscpy(clt->sessname, sessname, sizeof(clt->sessname));
2727 init_waitqueue_head(&clt->permits_wait);
2728 mutex_init(&clt->paths_ev_mutex);
2729 mutex_init(&clt->paths_mutex);
2731 clt->dev.class = rtrs_clt_dev_class;
2732 clt->dev.release = rtrs_clt_dev_release;
2733 err = dev_set_name(&clt->dev, "%s", sessname);
2737 * Suppress user space notification until
2738 * sysfs files are created
2740 dev_set_uevent_suppress(&clt->dev, true);
2741 err = device_register(&clt->dev);
2743 put_device(&clt->dev);
2747 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2748 if (!clt->kobj_paths) {
2752 err = rtrs_clt_create_sysfs_root_files(clt);
2754 kobject_del(clt->kobj_paths);
2755 kobject_put(clt->kobj_paths);
2758 dev_set_uevent_suppress(&clt->dev, false);
2759 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2763 device_unregister(&clt->dev);
2765 free_percpu(clt->pcpu_path);
2767 return ERR_PTR(err);
2770 static void free_clt(struct rtrs_clt_sess *clt)
2773 free_percpu(clt->pcpu_path);
2774 mutex_destroy(&clt->paths_ev_mutex);
2775 mutex_destroy(&clt->paths_mutex);
2776 /* release callback will free clt in last put */
2777 device_unregister(&clt->dev);
2781 * rtrs_clt_open() - Open a path to an RTRS server
2782 * @ops: holds the link event callback and the private pointer.
2783 * @sessname: name of the session
2784 * @paths: Paths to be established defined by their src and dst addresses
2785 * @paths_num: Number of elements in the @paths array
2786 * @port: port to be used by the RTRS session
2787 * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2788 * @reconnect_delay_sec: time between reconnect tries
2789 * @max_reconnect_attempts: Number of times to reconnect on error before giving
2790 * up, 0 for * disabled, -1 for forever
2791 * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
2793 * Starts session establishment with the rtrs_server. The function can block
2794 * up to ~2000ms before it returns.
2796 * Return a valid pointer on success otherwise PTR_ERR.
2798 struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
2799 const char *pathname,
2800 const struct rtrs_addr *paths,
2801 size_t paths_num, u16 port,
2802 size_t pdu_sz, u8 reconnect_delay_sec,
2803 s16 max_reconnect_attempts, u32 nr_poll_queues)
2805 struct rtrs_clt_path *clt_path, *tmp;
2806 struct rtrs_clt_sess *clt;
2809 if (strchr(pathname, '/') || strchr(pathname, '.')) {
2810 pr_err("pathname cannot contain / and .\n");
2815 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
2817 reconnect_delay_sec,
2818 max_reconnect_attempts);
2823 for (i = 0; i < paths_num; i++) {
2824 struct rtrs_clt_path *clt_path;
2826 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
2828 if (IS_ERR(clt_path)) {
2829 err = PTR_ERR(clt_path);
2830 goto close_all_path;
2833 clt_path->for_new_clt = 1;
2834 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2836 err = init_path(clt_path);
2838 list_del_rcu(&clt_path->s.entry);
2839 rtrs_clt_close_conns(clt_path, true);
2840 free_percpu(clt_path->stats->pcpu_stats);
2841 kfree(clt_path->stats);
2842 free_path(clt_path);
2843 goto close_all_path;
2846 err = rtrs_clt_create_path_files(clt_path);
2848 list_del_rcu(&clt_path->s.entry);
2849 rtrs_clt_close_conns(clt_path, true);
2850 free_percpu(clt_path->stats->pcpu_stats);
2851 kfree(clt_path->stats);
2852 free_path(clt_path);
2853 goto close_all_path;
2856 err = alloc_permits(clt);
2858 goto close_all_path;
2863 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2864 rtrs_clt_destroy_path_files(clt_path, NULL);
2865 rtrs_clt_close_conns(clt_path, true);
2866 kobject_put(&clt_path->kobj);
2868 rtrs_clt_destroy_sysfs_root(clt);
2872 return ERR_PTR(err);
2874 EXPORT_SYMBOL(rtrs_clt_open);
2877 * rtrs_clt_close() - Close a path
2878 * @clt: Session handle. Session is freed upon return.
2880 void rtrs_clt_close(struct rtrs_clt_sess *clt)
2882 struct rtrs_clt_path *clt_path, *tmp;
2884 /* Firstly forbid sysfs access */
2885 rtrs_clt_destroy_sysfs_root(clt);
2887 /* Now it is safe to iterate over all paths without locks */
2888 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2889 rtrs_clt_close_conns(clt_path, true);
2890 rtrs_clt_destroy_path_files(clt_path, NULL);
2891 kobject_put(&clt_path->kobj);
2895 EXPORT_SYMBOL(rtrs_clt_close);
2897 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
2899 enum rtrs_clt_state old_state;
2903 changed = rtrs_clt_change_state_get_old(clt_path,
2904 RTRS_CLT_RECONNECTING,
2907 clt_path->reconnect_attempts = 0;
2908 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
2910 if (changed || old_state == RTRS_CLT_RECONNECTING) {
2912 * flush_delayed_work() queues pending work for immediate
2913 * execution, so do the flush if we have queued something
2914 * right now or work is pending.
2916 flush_delayed_work(&clt_path->reconnect_dwork);
2917 err = (READ_ONCE(clt_path->state) ==
2918 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2924 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
2925 const struct attribute *sysfs_self)
2927 enum rtrs_clt_state old_state;
2931 * Continue stopping path till state was changed to DEAD or
2932 * state was observed as DEAD:
2933 * 1. State was changed to DEAD - we were fast and nobody
2934 * invoked rtrs_clt_reconnect(), which can again start
2936 * 2. State was observed as DEAD - we have someone in parallel
2937 * removing the path.
2940 rtrs_clt_close_conns(clt_path, true);
2941 changed = rtrs_clt_change_state_get_old(clt_path,
2944 } while (!changed && old_state != RTRS_CLT_DEAD);
2947 rtrs_clt_remove_path_from_arr(clt_path);
2948 rtrs_clt_destroy_path_files(clt_path, sysfs_self);
2949 kobject_put(&clt_path->kobj);
2955 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
2957 clt->max_reconnect_attempts = (unsigned int)value;
2960 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
2962 return (int)clt->max_reconnect_attempts;
2966 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2969 * @ops: callback function to be called as confirmation, and the pointer.
2971 * @permit: Preallocated permit
2972 * @vec: Message that is sent to server together with the request.
2973 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2974 * Since the msg is copied internally it can be allocated on stack.
2975 * @nr: Number of elements in @vec.
2976 * @data_len: length of data sent to/from server
2977 * @sg: Pages to be sent/received to/from server.
2978 * @sg_cnt: Number of elements in the @sg
2984 * On dir=READ rtrs client will request a data transfer from Server to client.
2985 * The data that the server will respond with will be stored in @sg when
2986 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2987 * On dir=WRITE rtrs client will rdma write data in sg to server side.
2989 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2990 struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
2991 const struct kvec *vec, size_t nr, size_t data_len,
2992 struct scatterlist *sg, unsigned int sg_cnt)
2994 struct rtrs_clt_io_req *req;
2995 struct rtrs_clt_path *clt_path;
2997 enum dma_data_direction dma_dir;
2998 int err = -ECONNABORTED, i;
2999 size_t usr_len, hdr_len;
3002 /* Get kvec length */
3003 for (i = 0, usr_len = 0; i < nr; i++)
3004 usr_len += vec[i].iov_len;
3007 hdr_len = sizeof(struct rtrs_msg_rdma_read) +
3008 sg_cnt * sizeof(struct rtrs_sg_desc);
3009 dma_dir = DMA_FROM_DEVICE;
3011 hdr_len = sizeof(struct rtrs_msg_rdma_write);
3012 dma_dir = DMA_TO_DEVICE;
3016 for (path_it_init(&it, clt);
3017 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3018 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3021 if (usr_len + hdr_len > clt_path->max_hdr_size) {
3022 rtrs_wrn_rl(clt_path->clt,
3023 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
3024 dir == READ ? "Read" : "Write",
3025 usr_len, hdr_len, clt_path->max_hdr_size);
3029 req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
3030 vec, usr_len, sg, sg_cnt, data_len,
3033 err = rtrs_clt_read_req(req);
3035 err = rtrs_clt_write_req(req);
3037 req->in_use = false;
3043 path_it_deinit(&it);
3048 EXPORT_SYMBOL(rtrs_clt_request);
3050 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
3052 /* If no path, return -1 for block layer not to try again */
3054 struct rtrs_con *con;
3055 struct rtrs_clt_path *clt_path;
3059 for (path_it_init(&it, clt);
3060 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3061 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3064 con = clt_path->s.con[index + 1];
3065 cnt = ib_process_cq_direct(con->cq, -1);
3069 path_it_deinit(&it);
3074 EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
3077 * rtrs_clt_query() - queries RTRS session attributes
3078 *@clt: session pointer
3079 *@attr: query results for session attributes.
3082 * -ECOMM no connection to the server
3084 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
3086 if (!rtrs_clt_is_connected(clt))
3089 attr->queue_depth = clt->queue_depth;
3090 attr->max_segments = clt->max_segments;
3091 /* Cap max_io_size to min of remote buffer size and the fr pages */
3092 attr->max_io_size = min_t(int, clt->max_io_size,
3093 clt->max_segments * SZ_4K);
3097 EXPORT_SYMBOL(rtrs_clt_query);
3099 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
3100 struct rtrs_addr *addr)
3102 struct rtrs_clt_path *clt_path;
3105 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
3106 if (IS_ERR(clt_path))
3107 return PTR_ERR(clt_path);
3109 mutex_lock(&clt->paths_mutex);
3110 if (clt->paths_num == 0) {
3112 * When all the paths are removed for a session,
3113 * the addition of the first path is like a new session for
3114 * the storage server
3116 clt_path->for_new_clt = 1;
3119 mutex_unlock(&clt->paths_mutex);
3122 * It is totally safe to add path in CONNECTING state: coming
3123 * IO will never grab it. Also it is very important to add
3124 * path before init, since init fires LINK_CONNECTED event.
3126 rtrs_clt_add_path_to_arr(clt_path);
3128 err = init_path(clt_path);
3132 err = rtrs_clt_create_path_files(clt_path);
3139 rtrs_clt_remove_path_from_arr(clt_path);
3140 rtrs_clt_close_conns(clt_path, true);
3141 free_percpu(clt_path->stats->pcpu_stats);
3142 kfree(clt_path->stats);
3143 free_path(clt_path);
3148 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
3150 if (!(dev->ib_dev->attrs.device_cap_flags &
3151 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
3152 pr_err("Memory registrations not supported.\n");
3159 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
3160 .init = rtrs_clt_ib_dev_init
3163 static int __init rtrs_client_init(void)
3165 rtrs_rdma_dev_pd_init(0, &dev_pd);
3167 rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
3168 if (IS_ERR(rtrs_clt_dev_class)) {
3169 pr_err("Failed to create rtrs-client dev class\n");
3170 return PTR_ERR(rtrs_clt_dev_class);
3172 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3174 class_destroy(rtrs_clt_dev_class);
3181 static void __exit rtrs_client_exit(void)
3183 destroy_workqueue(rtrs_wq);
3184 class_destroy(rtrs_clt_dev_class);
3185 rtrs_rdma_dev_pd_deinit(&dev_pd);
3188 module_init(rtrs_client_init);
3189 module_exit(rtrs_client_exit);