1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
6 * Author(s): Long Li <longli@microsoft.com>,
7 * Hyunchul Lee <hyc.lee@gmail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
20 #define SUBMOD_NAME "smb_direct"
22 #include <linux/kthread.h>
23 #include <linux/list.h>
24 #include <linux/mempool.h>
25 #include <linux/highmem.h>
26 #include <linux/scatterlist.h>
27 #include <rdma/ib_verbs.h>
28 #include <rdma/rdma_cm.h>
32 #include "connection.h"
33 #include "smb_common.h"
34 #include "smbstatus.h"
35 #include "transport_rdma.h"
37 #define SMB_DIRECT_PORT_IWARP 5445
38 #define SMB_DIRECT_PORT_INFINIBAND 445
40 #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
42 /* SMB_DIRECT negotiation timeout in seconds */
43 #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
45 #define SMB_DIRECT_MAX_SEND_SGES 8
46 #define SMB_DIRECT_MAX_RECV_SGES 1
49 * Default maximum number of RDMA read/write outstanding on this connection
50 * This value is possibly decreased during QP creation on hardware limit
52 #define SMB_DIRECT_CM_INITIATOR_DEPTH 8
54 /* Maximum number of retries on data transfer operations */
55 #define SMB_DIRECT_CM_RETRY 6
56 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
57 #define SMB_DIRECT_CM_RNR_RETRY 0
60 * User configurable initial values per SMB_DIRECT transport connection
61 * as defined in [MS-SMBD] 3.1.1.1
62 * Those may change after a SMB_DIRECT negotiation
65 /* Set 445 port to SMB Direct port by default */
66 static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
68 /* The local peer's maximum number of credits to grant to the peer */
69 static int smb_direct_receive_credit_max = 255;
71 /* The remote peer's credit request of local peer */
72 static int smb_direct_send_credit_target = 255;
74 /* The maximum single message size can be sent to remote peer */
75 static int smb_direct_max_send_size = 8192;
77 /* The maximum fragmented upper-layer payload receive size supported */
78 static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
80 /* The maximum single-message size which can be received */
81 static int smb_direct_max_receive_size = 8192;
83 static int smb_direct_max_read_write_size = 524224;
85 static int smb_direct_max_outstanding_rw_ops = 8;
87 static LIST_HEAD(smb_direct_device_list);
88 static DEFINE_RWLOCK(smb_direct_device_lock);
90 struct smb_direct_device {
91 struct ib_device *ib_dev;
92 struct list_head list;
95 static struct smb_direct_listener {
96 struct rdma_cm_id *cm_id;
97 } smb_direct_listener;
99 static struct workqueue_struct *smb_direct_wq;
101 enum smb_direct_status {
102 SMB_DIRECT_CS_NEW = 0,
103 SMB_DIRECT_CS_CONNECTED,
104 SMB_DIRECT_CS_DISCONNECTING,
105 SMB_DIRECT_CS_DISCONNECTED,
108 struct smb_direct_transport {
109 struct ksmbd_transport transport;
111 enum smb_direct_status status;
112 bool full_packet_received;
113 wait_queue_head_t wait_status;
115 struct rdma_cm_id *cm_id;
116 struct ib_cq *send_cq;
117 struct ib_cq *recv_cq;
123 int max_fragmented_send_size;
124 int max_fragmented_recv_size;
125 int max_rdma_rw_size;
127 spinlock_t reassembly_queue_lock;
128 struct list_head reassembly_queue;
129 int reassembly_data_length;
130 int reassembly_queue_length;
131 int first_entry_offset;
132 wait_queue_head_t wait_reassembly_queue;
134 spinlock_t receive_credit_lock;
136 int count_avail_recvmsg;
138 int recv_credit_target;
140 spinlock_t recvmsg_queue_lock;
141 struct list_head recvmsg_queue;
143 spinlock_t empty_recvmsg_queue_lock;
144 struct list_head empty_recvmsg_queue;
146 int send_credit_target;
147 atomic_t send_credits;
148 spinlock_t lock_new_recv_credits;
149 int new_recv_credits;
150 atomic_t rw_avail_ops;
152 wait_queue_head_t wait_send_credits;
153 wait_queue_head_t wait_rw_avail_ops;
155 mempool_t *sendmsg_mempool;
156 struct kmem_cache *sendmsg_cache;
157 mempool_t *recvmsg_mempool;
158 struct kmem_cache *recvmsg_cache;
160 wait_queue_head_t wait_send_payload_pending;
161 atomic_t send_payload_pending;
162 wait_queue_head_t wait_send_pending;
163 atomic_t send_pending;
165 struct delayed_work post_recv_credits_work;
166 struct work_struct send_immediate_work;
167 struct work_struct disconnect_work;
169 bool negotiation_requested;
172 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
175 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
176 SMB_DIRECT_MSG_DATA_TRANSFER
179 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
181 struct smb_direct_send_ctx {
182 struct list_head msg_list;
184 bool need_invalidate_rkey;
185 unsigned int remote_key;
188 struct smb_direct_sendmsg {
189 struct smb_direct_transport *transport;
190 struct ib_send_wr wr;
191 struct list_head list;
193 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
198 struct smb_direct_recvmsg {
199 struct smb_direct_transport *transport;
200 struct list_head list;
208 struct smb_direct_rdma_rw_msg {
209 struct smb_direct_transport *t;
211 struct completion *completion;
212 struct rdma_rw_ctx rw_ctx;
214 struct scatterlist sg_list[0];
217 static inline int get_buf_page_count(void *buf, int size)
219 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
220 (uintptr_t)buf / PAGE_SIZE;
223 static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
224 static void smb_direct_post_recv_credits(struct work_struct *work);
225 static int smb_direct_post_send_data(struct smb_direct_transport *t,
226 struct smb_direct_send_ctx *send_ctx,
227 struct kvec *iov, int niov,
228 int remaining_data_length);
230 static inline struct smb_direct_transport *
231 smb_trans_direct_transfort(struct ksmbd_transport *t)
233 return container_of(t, struct smb_direct_transport, transport);
237 *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
239 return (void *)recvmsg->packet;
242 static inline bool is_receive_credit_post_required(int receive_credits,
243 int avail_recvmsg_count)
245 return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
246 avail_recvmsg_count >= (receive_credits >> 2);
250 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
252 struct smb_direct_recvmsg *recvmsg = NULL;
254 spin_lock(&t->recvmsg_queue_lock);
255 if (!list_empty(&t->recvmsg_queue)) {
256 recvmsg = list_first_entry(&t->recvmsg_queue,
257 struct smb_direct_recvmsg,
259 list_del(&recvmsg->list);
261 spin_unlock(&t->recvmsg_queue_lock);
265 static void put_recvmsg(struct smb_direct_transport *t,
266 struct smb_direct_recvmsg *recvmsg)
268 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
269 recvmsg->sge.length, DMA_FROM_DEVICE);
271 spin_lock(&t->recvmsg_queue_lock);
272 list_add(&recvmsg->list, &t->recvmsg_queue);
273 spin_unlock(&t->recvmsg_queue_lock);
277 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
279 struct smb_direct_recvmsg *recvmsg = NULL;
281 spin_lock(&t->empty_recvmsg_queue_lock);
282 if (!list_empty(&t->empty_recvmsg_queue)) {
283 recvmsg = list_first_entry(&t->empty_recvmsg_queue,
284 struct smb_direct_recvmsg, list);
285 list_del(&recvmsg->list);
287 spin_unlock(&t->empty_recvmsg_queue_lock);
291 static void put_empty_recvmsg(struct smb_direct_transport *t,
292 struct smb_direct_recvmsg *recvmsg)
294 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
295 recvmsg->sge.length, DMA_FROM_DEVICE);
297 spin_lock(&t->empty_recvmsg_queue_lock);
298 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
299 spin_unlock(&t->empty_recvmsg_queue_lock);
302 static void enqueue_reassembly(struct smb_direct_transport *t,
303 struct smb_direct_recvmsg *recvmsg,
306 spin_lock(&t->reassembly_queue_lock);
307 list_add_tail(&recvmsg->list, &t->reassembly_queue);
308 t->reassembly_queue_length++;
310 * Make sure reassembly_data_length is updated after list and
311 * reassembly_queue_length are updated. On the dequeue side
312 * reassembly_data_length is checked without a lock to determine
313 * if reassembly_queue_length and list is up to date
316 t->reassembly_data_length += data_length;
317 spin_unlock(&t->reassembly_queue_lock);
320 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
322 if (!list_empty(&t->reassembly_queue))
323 return list_first_entry(&t->reassembly_queue,
324 struct smb_direct_recvmsg, list);
329 static void smb_direct_disconnect_rdma_work(struct work_struct *work)
331 struct smb_direct_transport *t =
332 container_of(work, struct smb_direct_transport,
335 if (t->status == SMB_DIRECT_CS_CONNECTED) {
336 t->status = SMB_DIRECT_CS_DISCONNECTING;
337 rdma_disconnect(t->cm_id);
342 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
344 if (t->status == SMB_DIRECT_CS_CONNECTED)
345 queue_work(smb_direct_wq, &t->disconnect_work);
348 static void smb_direct_send_immediate_work(struct work_struct *work)
350 struct smb_direct_transport *t = container_of(work,
351 struct smb_direct_transport, send_immediate_work);
353 if (t->status != SMB_DIRECT_CS_CONNECTED)
356 smb_direct_post_send_data(t, NULL, NULL, 0, 0);
359 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
361 struct smb_direct_transport *t;
362 struct ksmbd_conn *conn;
364 t = kzalloc(sizeof(*t), GFP_KERNEL);
371 t->status = SMB_DIRECT_CS_NEW;
372 init_waitqueue_head(&t->wait_status);
374 spin_lock_init(&t->reassembly_queue_lock);
375 INIT_LIST_HEAD(&t->reassembly_queue);
376 t->reassembly_data_length = 0;
377 t->reassembly_queue_length = 0;
378 init_waitqueue_head(&t->wait_reassembly_queue);
379 init_waitqueue_head(&t->wait_send_credits);
380 init_waitqueue_head(&t->wait_rw_avail_ops);
382 spin_lock_init(&t->receive_credit_lock);
383 spin_lock_init(&t->recvmsg_queue_lock);
384 INIT_LIST_HEAD(&t->recvmsg_queue);
386 spin_lock_init(&t->empty_recvmsg_queue_lock);
387 INIT_LIST_HEAD(&t->empty_recvmsg_queue);
389 init_waitqueue_head(&t->wait_send_payload_pending);
390 atomic_set(&t->send_payload_pending, 0);
391 init_waitqueue_head(&t->wait_send_pending);
392 atomic_set(&t->send_pending, 0);
394 spin_lock_init(&t->lock_new_recv_credits);
396 INIT_DELAYED_WORK(&t->post_recv_credits_work,
397 smb_direct_post_recv_credits);
398 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
399 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
401 conn = ksmbd_conn_alloc();
404 conn->transport = KSMBD_TRANS(t);
405 KSMBD_TRANS(t)->conn = conn;
406 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
413 static void free_transport(struct smb_direct_transport *t)
415 struct smb_direct_recvmsg *recvmsg;
417 wake_up_interruptible(&t->wait_send_credits);
419 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
420 wait_event(t->wait_send_payload_pending,
421 atomic_read(&t->send_payload_pending) == 0);
422 wait_event(t->wait_send_pending,
423 atomic_read(&t->send_pending) == 0);
425 cancel_work_sync(&t->disconnect_work);
426 cancel_delayed_work_sync(&t->post_recv_credits_work);
427 cancel_work_sync(&t->send_immediate_work);
431 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
432 ib_destroy_qp(t->qp);
435 ksmbd_debug(RDMA, "drain the reassembly queue\n");
437 spin_lock(&t->reassembly_queue_lock);
438 recvmsg = get_first_reassembly(t);
440 list_del(&recvmsg->list);
441 spin_unlock(&t->reassembly_queue_lock);
442 put_recvmsg(t, recvmsg);
444 spin_unlock(&t->reassembly_queue_lock);
447 t->reassembly_data_length = 0;
450 ib_free_cq(t->send_cq);
452 ib_free_cq(t->recv_cq);
454 ib_dealloc_pd(t->pd);
456 rdma_destroy_id(t->cm_id);
458 smb_direct_destroy_pools(t);
459 ksmbd_conn_free(KSMBD_TRANS(t)->conn);
463 static struct smb_direct_sendmsg
464 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
466 struct smb_direct_sendmsg *msg;
468 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
470 return ERR_PTR(-ENOMEM);
472 INIT_LIST_HEAD(&msg->list);
477 static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
478 struct smb_direct_sendmsg *msg)
482 if (msg->num_sge > 0) {
483 ib_dma_unmap_single(t->cm_id->device,
484 msg->sge[0].addr, msg->sge[0].length,
486 for (i = 1; i < msg->num_sge; i++)
487 ib_dma_unmap_page(t->cm_id->device,
488 msg->sge[i].addr, msg->sge[i].length,
491 mempool_free(msg, t->sendmsg_mempool);
494 static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
496 switch (recvmsg->type) {
497 case SMB_DIRECT_MSG_DATA_TRANSFER: {
498 struct smb_direct_data_transfer *req =
499 (struct smb_direct_data_transfer *)recvmsg->packet;
500 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
501 + le32_to_cpu(req->data_offset));
503 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
504 le16_to_cpu(req->credits_granted),
505 le16_to_cpu(req->credits_requested),
506 req->data_length, req->remaining_data_length,
507 hdr->ProtocolId, hdr->Command);
510 case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
511 struct smb_direct_negotiate_req *req =
512 (struct smb_direct_negotiate_req *)recvmsg->packet;
514 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
515 le16_to_cpu(req->min_version),
516 le16_to_cpu(req->max_version),
517 le16_to_cpu(req->credits_requested),
518 le32_to_cpu(req->preferred_send_size),
519 le32_to_cpu(req->max_receive_size),
520 le32_to_cpu(req->max_fragmented_size));
521 if (le16_to_cpu(req->min_version) > 0x0100 ||
522 le16_to_cpu(req->max_version) < 0x0100)
524 if (le16_to_cpu(req->credits_requested) <= 0 ||
525 le32_to_cpu(req->max_receive_size) <= 128 ||
526 le32_to_cpu(req->max_fragmented_size) <=
528 return -ECONNABORTED;
538 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
540 struct smb_direct_recvmsg *recvmsg;
541 struct smb_direct_transport *t;
543 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
544 t = recvmsg->transport;
546 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
547 if (wc->status != IB_WC_WR_FLUSH_ERR) {
548 pr_err("Recv error. status='%s (%d)' opcode=%d\n",
549 ib_wc_status_msg(wc->status), wc->status,
551 smb_direct_disconnect_rdma_connection(t);
553 put_empty_recvmsg(t, recvmsg);
557 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
558 ib_wc_status_msg(wc->status), wc->status,
561 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
562 recvmsg->sge.length, DMA_FROM_DEVICE);
564 switch (recvmsg->type) {
565 case SMB_DIRECT_MSG_NEGOTIATE_REQ:
566 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
567 put_empty_recvmsg(t, recvmsg);
570 t->negotiation_requested = true;
571 t->full_packet_received = true;
572 enqueue_reassembly(t, recvmsg, 0);
573 wake_up_interruptible(&t->wait_status);
575 case SMB_DIRECT_MSG_DATA_TRANSFER: {
576 struct smb_direct_data_transfer *data_transfer =
577 (struct smb_direct_data_transfer *)recvmsg->packet;
578 unsigned int data_length;
579 int avail_recvmsg_count, receive_credits;
582 offsetof(struct smb_direct_data_transfer, padding)) {
583 put_empty_recvmsg(t, recvmsg);
587 data_length = le32_to_cpu(data_transfer->data_length);
589 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
591 put_empty_recvmsg(t, recvmsg);
595 if (t->full_packet_received)
596 recvmsg->first_segment = true;
598 if (le32_to_cpu(data_transfer->remaining_data_length))
599 t->full_packet_received = false;
601 t->full_packet_received = true;
603 enqueue_reassembly(t, recvmsg, (int)data_length);
604 wake_up_interruptible(&t->wait_reassembly_queue);
606 spin_lock(&t->receive_credit_lock);
607 receive_credits = --(t->recv_credits);
608 avail_recvmsg_count = t->count_avail_recvmsg;
609 spin_unlock(&t->receive_credit_lock);
611 put_empty_recvmsg(t, recvmsg);
613 spin_lock(&t->receive_credit_lock);
614 receive_credits = --(t->recv_credits);
615 avail_recvmsg_count = ++(t->count_avail_recvmsg);
616 spin_unlock(&t->receive_credit_lock);
619 t->recv_credit_target =
620 le16_to_cpu(data_transfer->credits_requested);
621 atomic_add(le16_to_cpu(data_transfer->credits_granted),
624 if (le16_to_cpu(data_transfer->flags) &
625 SMB_DIRECT_RESPONSE_REQUESTED)
626 queue_work(smb_direct_wq, &t->send_immediate_work);
628 if (atomic_read(&t->send_credits) > 0)
629 wake_up_interruptible(&t->wait_send_credits);
631 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
632 mod_delayed_work(smb_direct_wq,
633 &t->post_recv_credits_work, 0);
641 static int smb_direct_post_recv(struct smb_direct_transport *t,
642 struct smb_direct_recvmsg *recvmsg)
644 struct ib_recv_wr wr;
647 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
648 recvmsg->packet, t->max_recv_size,
650 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
653 recvmsg->sge.length = t->max_recv_size;
654 recvmsg->sge.lkey = t->pd->local_dma_lkey;
655 recvmsg->cqe.done = recv_done;
657 wr.wr_cqe = &recvmsg->cqe;
659 wr.sg_list = &recvmsg->sge;
662 ret = ib_post_recv(t->qp, &wr, NULL);
664 pr_err("Can't post recv: %d\n", ret);
665 ib_dma_unmap_single(t->cm_id->device,
666 recvmsg->sge.addr, recvmsg->sge.length,
668 smb_direct_disconnect_rdma_connection(t);
674 static int smb_direct_read(struct ksmbd_transport *t, char *buf,
677 struct smb_direct_recvmsg *recvmsg;
678 struct smb_direct_data_transfer *data_transfer;
679 int to_copy, to_read, data_read, offset;
680 u32 data_length, remaining_data_length, data_offset;
682 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
685 if (st->status != SMB_DIRECT_CS_CONNECTED) {
686 pr_err("disconnected\n");
691 * No need to hold the reassembly queue lock all the time as we are
692 * the only one reading from the front of the queue. The transport
693 * may add more entries to the back of the queue at the same time
695 if (st->reassembly_data_length >= size) {
697 int queue_removed = 0;
700 * Need to make sure reassembly_data_length is read before
701 * reading reassembly_queue_length and calling
702 * get_first_reassembly. This call is lock free
703 * as we never read at the end of the queue which are being
704 * updated in SOFTIRQ as more data is received
707 queue_length = st->reassembly_queue_length;
710 offset = st->first_entry_offset;
711 while (data_read < size) {
712 recvmsg = get_first_reassembly(st);
713 data_transfer = smb_direct_recvmsg_payload(recvmsg);
714 data_length = le32_to_cpu(data_transfer->data_length);
715 remaining_data_length =
716 le32_to_cpu(data_transfer->remaining_data_length);
717 data_offset = le32_to_cpu(data_transfer->data_offset);
720 * The upper layer expects RFC1002 length at the
721 * beginning of the payload. Return it to indicate
722 * the total length of the packet. This minimize the
723 * change to upper layer packet processing logic. This
724 * will be eventually remove when an intermediate
725 * transport layer is added
727 if (recvmsg->first_segment && size == 4) {
728 unsigned int rfc1002_len =
729 data_length + remaining_data_length;
730 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
732 recvmsg->first_segment = false;
734 "returning rfc1002 length %d\n",
736 goto read_rfc1002_done;
739 to_copy = min_t(int, data_length - offset, to_read);
740 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
743 /* move on to the next buffer? */
744 if (to_copy == data_length - offset) {
747 * No need to lock if we are not at the
751 list_del(&recvmsg->list);
753 spin_lock_irq(&st->reassembly_queue_lock);
754 list_del(&recvmsg->list);
755 spin_unlock_irq(&st->reassembly_queue_lock);
758 put_recvmsg(st, recvmsg);
765 data_read += to_copy;
768 spin_lock_irq(&st->reassembly_queue_lock);
769 st->reassembly_data_length -= data_read;
770 st->reassembly_queue_length -= queue_removed;
771 spin_unlock_irq(&st->reassembly_queue_lock);
773 spin_lock(&st->receive_credit_lock);
774 st->count_avail_recvmsg += queue_removed;
775 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
776 spin_unlock(&st->receive_credit_lock);
777 mod_delayed_work(smb_direct_wq,
778 &st->post_recv_credits_work, 0);
780 spin_unlock(&st->receive_credit_lock);
783 st->first_entry_offset = offset;
785 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
786 data_read, st->reassembly_data_length,
787 st->first_entry_offset);
792 ksmbd_debug(RDMA, "wait_event on more data\n");
793 rc = wait_event_interruptible(st->wait_reassembly_queue,
794 st->reassembly_data_length >= size ||
795 st->status != SMB_DIRECT_CS_CONNECTED);
802 static void smb_direct_post_recv_credits(struct work_struct *work)
804 struct smb_direct_transport *t = container_of(work,
805 struct smb_direct_transport, post_recv_credits_work.work);
806 struct smb_direct_recvmsg *recvmsg;
807 int receive_credits, credits = 0;
811 spin_lock(&t->receive_credit_lock);
812 receive_credits = t->recv_credits;
813 spin_unlock(&t->receive_credit_lock);
815 if (receive_credits < t->recv_credit_target) {
818 recvmsg = get_free_recvmsg(t);
820 recvmsg = get_empty_recvmsg(t);
830 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
831 recvmsg->first_segment = false;
833 ret = smb_direct_post_recv(t, recvmsg);
835 pr_err("Can't post recv: %d\n", ret);
836 put_recvmsg(t, recvmsg);
843 spin_lock(&t->receive_credit_lock);
844 t->recv_credits += credits;
845 t->count_avail_recvmsg -= credits;
846 spin_unlock(&t->receive_credit_lock);
848 spin_lock(&t->lock_new_recv_credits);
849 t->new_recv_credits += credits;
850 spin_unlock(&t->lock_new_recv_credits);
853 queue_work(smb_direct_wq, &t->send_immediate_work);
856 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
858 struct smb_direct_sendmsg *sendmsg, *sibling;
859 struct smb_direct_transport *t;
860 struct list_head *pos, *prev, *end;
862 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
863 t = sendmsg->transport;
865 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
866 ib_wc_status_msg(wc->status), wc->status,
869 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
870 pr_err("Send error. status='%s (%d)', opcode=%d\n",
871 ib_wc_status_msg(wc->status), wc->status,
873 smb_direct_disconnect_rdma_connection(t);
876 if (sendmsg->num_sge > 1) {
877 if (atomic_dec_and_test(&t->send_payload_pending))
878 wake_up(&t->wait_send_payload_pending);
880 if (atomic_dec_and_test(&t->send_pending))
881 wake_up(&t->wait_send_pending);
884 /* iterate and free the list of messages in reverse. the list's head
887 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
888 prev != end; pos = prev, prev = prev->prev) {
889 sibling = container_of(pos, struct smb_direct_sendmsg, list);
890 smb_direct_free_sendmsg(t, sibling);
893 sibling = container_of(pos, struct smb_direct_sendmsg, list);
894 smb_direct_free_sendmsg(t, sibling);
897 static int manage_credits_prior_sending(struct smb_direct_transport *t)
901 spin_lock(&t->lock_new_recv_credits);
902 new_credits = t->new_recv_credits;
903 t->new_recv_credits = 0;
904 spin_unlock(&t->lock_new_recv_credits);
909 static int smb_direct_post_send(struct smb_direct_transport *t,
910 struct ib_send_wr *wr)
915 atomic_inc(&t->send_payload_pending);
917 atomic_inc(&t->send_pending);
919 ret = ib_post_send(t->qp, wr, NULL);
921 pr_err("failed to post send: %d\n", ret);
922 if (wr->num_sge > 1) {
923 if (atomic_dec_and_test(&t->send_payload_pending))
924 wake_up(&t->wait_send_payload_pending);
926 if (atomic_dec_and_test(&t->send_pending))
927 wake_up(&t->wait_send_pending);
929 smb_direct_disconnect_rdma_connection(t);
934 static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
935 struct smb_direct_send_ctx *send_ctx,
936 bool need_invalidate_rkey,
937 unsigned int remote_key)
939 INIT_LIST_HEAD(&send_ctx->msg_list);
940 send_ctx->wr_cnt = 0;
941 send_ctx->need_invalidate_rkey = need_invalidate_rkey;
942 send_ctx->remote_key = remote_key;
945 static int smb_direct_flush_send_list(struct smb_direct_transport *t,
946 struct smb_direct_send_ctx *send_ctx,
949 struct smb_direct_sendmsg *first, *last;
952 if (list_empty(&send_ctx->msg_list))
955 first = list_first_entry(&send_ctx->msg_list,
956 struct smb_direct_sendmsg,
958 last = list_last_entry(&send_ctx->msg_list,
959 struct smb_direct_sendmsg,
962 last->wr.send_flags = IB_SEND_SIGNALED;
963 last->wr.wr_cqe = &last->cqe;
964 if (is_last && send_ctx->need_invalidate_rkey) {
965 last->wr.opcode = IB_WR_SEND_WITH_INV;
966 last->wr.ex.invalidate_rkey = send_ctx->remote_key;
969 ret = smb_direct_post_send(t, &first->wr);
971 smb_direct_send_ctx_init(t, send_ctx,
972 send_ctx->need_invalidate_rkey,
973 send_ctx->remote_key);
975 atomic_add(send_ctx->wr_cnt, &t->send_credits);
976 wake_up(&t->wait_send_credits);
977 list_for_each_entry_safe(first, last, &send_ctx->msg_list,
979 smb_direct_free_sendmsg(t, first);
985 static int wait_for_credits(struct smb_direct_transport *t,
986 wait_queue_head_t *waitq, atomic_t *credits)
991 if (atomic_dec_return(credits) >= 0)
995 ret = wait_event_interruptible(*waitq,
996 atomic_read(credits) > 0 ||
997 t->status != SMB_DIRECT_CS_CONNECTED);
999 if (t->status != SMB_DIRECT_CS_CONNECTED)
1006 static int wait_for_send_credits(struct smb_direct_transport *t,
1007 struct smb_direct_send_ctx *send_ctx)
1012 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
1013 ret = smb_direct_flush_send_list(t, send_ctx, false);
1018 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
1021 static int smb_direct_create_header(struct smb_direct_transport *t,
1022 int size, int remaining_data_length,
1023 struct smb_direct_sendmsg **sendmsg_out)
1025 struct smb_direct_sendmsg *sendmsg;
1026 struct smb_direct_data_transfer *packet;
1030 sendmsg = smb_direct_alloc_sendmsg(t);
1031 if (IS_ERR(sendmsg))
1032 return PTR_ERR(sendmsg);
1034 /* Fill in the packet header */
1035 packet = (struct smb_direct_data_transfer *)sendmsg->packet;
1036 packet->credits_requested = cpu_to_le16(t->send_credit_target);
1037 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1040 packet->reserved = 0;
1042 packet->data_offset = 0;
1044 packet->data_offset = cpu_to_le32(24);
1045 packet->data_length = cpu_to_le32(size);
1046 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1047 packet->padding = 0;
1050 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1051 le16_to_cpu(packet->credits_requested),
1052 le16_to_cpu(packet->credits_granted),
1053 le32_to_cpu(packet->data_offset),
1054 le32_to_cpu(packet->data_length),
1055 le32_to_cpu(packet->remaining_data_length));
1057 /* Map the packet to DMA */
1058 header_length = sizeof(struct smb_direct_data_transfer);
1059 /* If this is a packet without payload, don't send padding */
1062 offsetof(struct smb_direct_data_transfer, padding);
1064 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1068 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1070 smb_direct_free_sendmsg(t, sendmsg);
1074 sendmsg->num_sge = 1;
1075 sendmsg->sge[0].length = header_length;
1076 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1078 *sendmsg_out = sendmsg;
1082 static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
1084 bool high = is_vmalloc_addr(buf);
1089 if (nentries < get_buf_page_count(buf, size))
1092 offset = offset_in_page(buf);
1095 len = min_t(int, PAGE_SIZE - offset, size);
1097 page = vmalloc_to_page(buf);
1099 page = kmap_to_page(buf);
1103 sg_set_page(sg_list, page, len, offset);
1104 sg_list = sg_next(sg_list);
1114 static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
1115 struct scatterlist *sg_list, int nentries,
1116 enum dma_data_direction dir)
1120 npages = get_sg_list(buf, size, sg_list, nentries);
1123 return ib_dma_map_sg(device, sg_list, npages, dir);
1126 static int post_sendmsg(struct smb_direct_transport *t,
1127 struct smb_direct_send_ctx *send_ctx,
1128 struct smb_direct_sendmsg *msg)
1132 for (i = 0; i < msg->num_sge; i++)
1133 ib_dma_sync_single_for_device(t->cm_id->device,
1134 msg->sge[i].addr, msg->sge[i].length,
1137 msg->cqe.done = send_done;
1138 msg->wr.opcode = IB_WR_SEND;
1139 msg->wr.sg_list = &msg->sge[0];
1140 msg->wr.num_sge = msg->num_sge;
1141 msg->wr.next = NULL;
1144 msg->wr.wr_cqe = NULL;
1145 msg->wr.send_flags = 0;
1146 if (!list_empty(&send_ctx->msg_list)) {
1147 struct smb_direct_sendmsg *last;
1149 last = list_last_entry(&send_ctx->msg_list,
1150 struct smb_direct_sendmsg,
1152 last->wr.next = &msg->wr;
1154 list_add_tail(&msg->list, &send_ctx->msg_list);
1159 msg->wr.wr_cqe = &msg->cqe;
1160 msg->wr.send_flags = IB_SEND_SIGNALED;
1161 return smb_direct_post_send(t, &msg->wr);
1164 static int smb_direct_post_send_data(struct smb_direct_transport *t,
1165 struct smb_direct_send_ctx *send_ctx,
1166 struct kvec *iov, int niov,
1167 int remaining_data_length)
1170 struct smb_direct_sendmsg *msg;
1172 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
1174 ret = wait_for_send_credits(t, send_ctx);
1179 for (i = 0; i < niov; i++)
1180 data_length += iov[i].iov_len;
1182 ret = smb_direct_create_header(t, data_length, remaining_data_length,
1185 atomic_inc(&t->send_credits);
1189 for (i = 0; i < niov; i++) {
1193 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
1194 sg_cnt = get_mapped_sg_list(t->cm_id->device,
1195 iov[i].iov_base, iov[i].iov_len,
1196 sg, SMB_DIRECT_MAX_SEND_SGES - 1,
1199 pr_err("failed to map buffer\n");
1202 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
1203 pr_err("buffer not fitted into sges\n");
1205 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
1210 for (j = 0; j < sg_cnt; j++) {
1211 sge = &msg->sge[msg->num_sge];
1212 sge->addr = sg_dma_address(&sg[j]);
1213 sge->length = sg_dma_len(&sg[j]);
1214 sge->lkey = t->pd->local_dma_lkey;
1219 ret = post_sendmsg(t, send_ctx, msg);
1224 smb_direct_free_sendmsg(t, msg);
1225 atomic_inc(&t->send_credits);
1229 static int smb_direct_writev(struct ksmbd_transport *t,
1230 struct kvec *iov, int niovs, int buflen,
1231 bool need_invalidate, unsigned int remote_key)
1233 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1234 int remaining_data_length;
1236 int max_iov_size = st->max_send_size -
1237 sizeof(struct smb_direct_data_transfer);
1240 struct smb_direct_send_ctx send_ctx;
1242 if (st->status != SMB_DIRECT_CS_CONNECTED)
1245 //FIXME: skip RFC1002 header..
1247 iov[0].iov_base += 4;
1248 iov[0].iov_len -= 4;
1250 remaining_data_length = buflen;
1251 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1253 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
1257 buflen += iov[i].iov_len;
1258 if (buflen > max_iov_size) {
1260 remaining_data_length -=
1261 (buflen - iov[i].iov_len);
1262 ret = smb_direct_post_send_data(st, &send_ctx,
1263 &iov[start], i - start,
1264 remaining_data_length);
1268 /* iov[start] is too big, break it */
1269 int nvec = (buflen + max_iov_size - 1) /
1272 for (j = 0; j < nvec; j++) {
1274 (char *)iov[start].iov_base +
1277 min_t(int, max_iov_size,
1278 buflen - max_iov_size * j);
1279 remaining_data_length -= vec.iov_len;
1280 ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
1281 remaining_data_length);
1294 /* send out all remaining vecs */
1295 remaining_data_length -= buflen;
1296 ret = smb_direct_post_send_data(st, &send_ctx,
1297 &iov[start], i - start,
1298 remaining_data_length);
1307 ret = smb_direct_flush_send_list(st, &send_ctx, true);
1310 * As an optimization, we don't wait for individual I/O to finish
1311 * before sending the next one.
1312 * Send them all and wait for pending send count to get to 0
1313 * that means all the I/Os have been out and we are good to return
1316 wait_event(st->wait_send_payload_pending,
1317 atomic_read(&st->send_payload_pending) == 0);
1321 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
1322 enum dma_data_direction dir)
1324 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
1325 struct smb_direct_rdma_rw_msg, cqe);
1326 struct smb_direct_transport *t = msg->t;
1328 if (wc->status != IB_WC_SUCCESS) {
1329 pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1330 wc->opcode, ib_wc_status_msg(wc->status), wc->status);
1331 smb_direct_disconnect_rdma_connection(t);
1334 if (atomic_inc_return(&t->rw_avail_ops) > 0)
1335 wake_up(&t->wait_rw_avail_ops);
1337 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
1338 msg->sg_list, msg->sgt.nents, dir);
1339 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1340 complete(msg->completion);
1344 static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1346 read_write_done(cq, wc, DMA_FROM_DEVICE);
1349 static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1351 read_write_done(cq, wc, DMA_TO_DEVICE);
1354 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
1355 int buf_len, u32 remote_key, u64 remote_offset,
1356 u32 remote_len, bool is_read)
1358 struct smb_direct_rdma_rw_msg *msg;
1360 DECLARE_COMPLETION_ONSTACK(completion);
1361 struct ib_send_wr *first_wr = NULL;
1363 ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
1368 msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
1369 sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
1371 atomic_inc(&t->rw_avail_ops);
1375 msg->sgt.sgl = &msg->sg_list[0];
1376 ret = sg_alloc_table_chained(&msg->sgt,
1377 get_buf_page_count(buf, buf_len),
1378 msg->sg_list, SG_CHUNK_SIZE);
1380 atomic_inc(&t->rw_avail_ops);
1385 ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
1387 pr_err("failed to get pages\n");
1391 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
1392 msg->sg_list, get_buf_page_count(buf, buf_len),
1393 0, remote_offset, remote_key,
1394 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1396 pr_err("failed to init rdma_rw_ctx: %d\n", ret);
1401 msg->cqe.done = is_read ? read_done : write_done;
1402 msg->completion = &completion;
1403 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
1406 ret = ib_post_send(t->qp, first_wr, NULL);
1408 pr_err("failed to post send wr: %d\n", ret);
1412 wait_for_completion(&completion);
1416 atomic_inc(&t->rw_avail_ops);
1418 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
1419 msg->sg_list, msg->sgt.nents,
1420 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1421 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1426 static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
1427 unsigned int buflen, u32 remote_key,
1428 u64 remote_offset, u32 remote_len)
1430 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1431 remote_key, remote_offset,
1435 static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
1436 unsigned int buflen, u32 remote_key,
1437 u64 remote_offset, u32 remote_len)
1439 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1440 remote_key, remote_offset,
1444 static void smb_direct_disconnect(struct ksmbd_transport *t)
1446 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1448 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
1450 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1451 wait_event_interruptible(st->wait_status,
1452 st->status == SMB_DIRECT_CS_DISCONNECTED);
1456 static void smb_direct_shutdown(struct ksmbd_transport *t)
1458 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1460 ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
1462 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1465 static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
1466 struct rdma_cm_event *event)
1468 struct smb_direct_transport *t = cm_id->context;
1470 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
1471 cm_id, rdma_event_msg(event->event), event->event);
1473 switch (event->event) {
1474 case RDMA_CM_EVENT_ESTABLISHED: {
1475 t->status = SMB_DIRECT_CS_CONNECTED;
1476 wake_up_interruptible(&t->wait_status);
1479 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1480 case RDMA_CM_EVENT_DISCONNECTED: {
1481 t->status = SMB_DIRECT_CS_DISCONNECTED;
1482 wake_up_interruptible(&t->wait_status);
1483 wake_up_interruptible(&t->wait_reassembly_queue);
1484 wake_up(&t->wait_send_credits);
1487 case RDMA_CM_EVENT_CONNECT_ERROR: {
1488 t->status = SMB_DIRECT_CS_DISCONNECTED;
1489 wake_up_interruptible(&t->wait_status);
1493 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
1494 cm_id, rdma_event_msg(event->event),
1501 static void smb_direct_qpair_handler(struct ib_event *event, void *context)
1503 struct smb_direct_transport *t = context;
1505 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
1506 t->cm_id, ib_event_msg(event->event), event->event);
1508 switch (event->event) {
1509 case IB_EVENT_CQ_ERR:
1510 case IB_EVENT_QP_FATAL:
1511 smb_direct_disconnect_rdma_connection(t);
1518 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
1521 struct smb_direct_sendmsg *sendmsg;
1522 struct smb_direct_negotiate_resp *resp;
1525 sendmsg = smb_direct_alloc_sendmsg(t);
1526 if (IS_ERR(sendmsg))
1529 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
1531 memset(resp, 0, sizeof(*resp));
1532 resp->min_version = cpu_to_le16(0x0100);
1533 resp->max_version = cpu_to_le16(0x0100);
1534 resp->status = STATUS_NOT_SUPPORTED;
1536 resp->status = STATUS_SUCCESS;
1537 resp->min_version = SMB_DIRECT_VERSION_LE;
1538 resp->max_version = SMB_DIRECT_VERSION_LE;
1539 resp->negotiated_version = SMB_DIRECT_VERSION_LE;
1541 resp->credits_requested =
1542 cpu_to_le16(t->send_credit_target);
1543 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1544 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
1545 resp->preferred_send_size = cpu_to_le32(t->max_send_size);
1546 resp->max_receive_size = cpu_to_le32(t->max_recv_size);
1547 resp->max_fragmented_size =
1548 cpu_to_le32(t->max_fragmented_recv_size);
1551 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1552 (void *)resp, sizeof(*resp),
1554 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1556 smb_direct_free_sendmsg(t, sendmsg);
1560 sendmsg->num_sge = 1;
1561 sendmsg->sge[0].length = sizeof(*resp);
1562 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1564 ret = post_sendmsg(t, NULL, sendmsg);
1566 smb_direct_free_sendmsg(t, sendmsg);
1570 wait_event(t->wait_send_pending,
1571 atomic_read(&t->send_pending) == 0);
1575 static int smb_direct_accept_client(struct smb_direct_transport *t)
1577 struct rdma_conn_param conn_param;
1578 struct ib_port_immutable port_immutable;
1582 memset(&conn_param, 0, sizeof(conn_param));
1583 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
1584 SMB_DIRECT_CM_INITIATOR_DEPTH);
1585 conn_param.responder_resources = 0;
1587 t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
1590 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1591 ird_ord_hdr[0] = conn_param.responder_resources;
1593 conn_param.private_data = ird_ord_hdr;
1594 conn_param.private_data_len = sizeof(ird_ord_hdr);
1596 conn_param.private_data = NULL;
1597 conn_param.private_data_len = 0;
1599 conn_param.retry_count = SMB_DIRECT_CM_RETRY;
1600 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
1601 conn_param.flow_control = 0;
1603 ret = rdma_accept(t->cm_id, &conn_param);
1605 pr_err("error at rdma_accept: %d\n", ret);
1611 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
1614 struct smb_direct_recvmsg *recvmsg;
1616 recvmsg = get_free_recvmsg(t);
1619 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
1621 ret = smb_direct_post_recv(t, recvmsg);
1623 pr_err("Can't post recv: %d\n", ret);
1627 t->negotiation_requested = false;
1628 ret = smb_direct_accept_client(t);
1630 pr_err("Can't accept client\n");
1634 smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
1637 put_recvmsg(t, recvmsg);
1641 static int smb_direct_init_params(struct smb_direct_transport *t,
1642 struct ib_qp_cap *cap)
1644 struct ib_device *device = t->cm_id->device;
1645 int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
1647 /* need 2 more sge. because a SMB_DIRECT header will be mapped,
1648 * and maybe a send buffer could be not page aligned.
1650 t->max_send_size = smb_direct_max_send_size;
1651 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
1652 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
1653 pr_err("max_send_size %d is too large\n", t->max_send_size);
1658 * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
1659 * read/writes. HCA guarantees at least max_send_sge of sges for
1660 * a RDMA read/write work request, and if memory registration is used,
1661 * we need reg_mr, local_inv wrs for each read/write.
1663 t->max_rdma_rw_size = smb_direct_max_read_write_size;
1664 max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1665 max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
1666 max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
1668 max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
1670 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
1671 if (max_send_wrs > device->attrs.max_cqe ||
1672 max_send_wrs > device->attrs.max_qp_wr) {
1673 pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
1674 smb_direct_send_credit_target,
1675 smb_direct_max_outstanding_rw_ops);
1676 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
1677 device->attrs.max_cqe, device->attrs.max_qp_wr);
1681 if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
1682 smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
1683 pr_err("consider lowering receive_credit_max = %d\n",
1684 smb_direct_receive_credit_max);
1685 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1686 device->attrs.max_cqe, device->attrs.max_qp_wr);
1690 if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
1691 pr_err("warning: device max_send_sge = %d too small\n",
1692 device->attrs.max_send_sge);
1695 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
1696 pr_err("warning: device max_recv_sge = %d too small\n",
1697 device->attrs.max_recv_sge);
1701 t->recv_credits = 0;
1702 t->count_avail_recvmsg = 0;
1704 t->recv_credit_max = smb_direct_receive_credit_max;
1705 t->recv_credit_target = 10;
1706 t->new_recv_credits = 0;
1708 t->send_credit_target = smb_direct_send_credit_target;
1709 atomic_set(&t->send_credits, 0);
1710 atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
1712 t->max_send_size = smb_direct_max_send_size;
1713 t->max_recv_size = smb_direct_max_receive_size;
1714 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
1716 cap->max_send_wr = max_send_wrs;
1717 cap->max_recv_wr = t->recv_credit_max;
1718 cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
1719 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
1720 cap->max_inline_data = 0;
1721 cap->max_rdma_ctxs =
1722 rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
1723 smb_direct_max_outstanding_rw_ops;
1727 static void smb_direct_destroy_pools(struct smb_direct_transport *t)
1729 struct smb_direct_recvmsg *recvmsg;
1731 while ((recvmsg = get_free_recvmsg(t)))
1732 mempool_free(recvmsg, t->recvmsg_mempool);
1733 while ((recvmsg = get_empty_recvmsg(t)))
1734 mempool_free(recvmsg, t->recvmsg_mempool);
1736 mempool_destroy(t->recvmsg_mempool);
1737 t->recvmsg_mempool = NULL;
1739 kmem_cache_destroy(t->recvmsg_cache);
1740 t->recvmsg_cache = NULL;
1742 mempool_destroy(t->sendmsg_mempool);
1743 t->sendmsg_mempool = NULL;
1745 kmem_cache_destroy(t->sendmsg_cache);
1746 t->sendmsg_cache = NULL;
1749 static int smb_direct_create_pools(struct smb_direct_transport *t)
1753 struct smb_direct_recvmsg *recvmsg;
1755 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
1756 t->sendmsg_cache = kmem_cache_create(name,
1757 sizeof(struct smb_direct_sendmsg) +
1758 sizeof(struct smb_direct_negotiate_resp),
1759 0, SLAB_HWCACHE_ALIGN, NULL);
1760 if (!t->sendmsg_cache)
1763 t->sendmsg_mempool = mempool_create(t->send_credit_target,
1764 mempool_alloc_slab, mempool_free_slab,
1766 if (!t->sendmsg_mempool)
1769 snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
1770 t->recvmsg_cache = kmem_cache_create(name,
1771 sizeof(struct smb_direct_recvmsg) +
1773 0, SLAB_HWCACHE_ALIGN, NULL);
1774 if (!t->recvmsg_cache)
1777 t->recvmsg_mempool =
1778 mempool_create(t->recv_credit_max, mempool_alloc_slab,
1779 mempool_free_slab, t->recvmsg_cache);
1780 if (!t->recvmsg_mempool)
1783 INIT_LIST_HEAD(&t->recvmsg_queue);
1785 for (i = 0; i < t->recv_credit_max; i++) {
1786 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
1789 recvmsg->transport = t;
1790 list_add(&recvmsg->list, &t->recvmsg_queue);
1792 t->count_avail_recvmsg = t->recv_credit_max;
1796 smb_direct_destroy_pools(t);
1800 static int smb_direct_create_qpair(struct smb_direct_transport *t,
1801 struct ib_qp_cap *cap)
1804 struct ib_qp_init_attr qp_attr;
1807 t->pd = ib_alloc_pd(t->cm_id->device, 0);
1808 if (IS_ERR(t->pd)) {
1809 pr_err("Can't create RDMA PD\n");
1810 ret = PTR_ERR(t->pd);
1815 t->send_cq = ib_alloc_cq(t->cm_id->device, t,
1816 t->send_credit_target, 0, IB_POLL_WORKQUEUE);
1817 if (IS_ERR(t->send_cq)) {
1818 pr_err("Can't create RDMA send CQ\n");
1819 ret = PTR_ERR(t->send_cq);
1824 t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
1825 cap->max_send_wr + cap->max_rdma_ctxs,
1826 0, IB_POLL_WORKQUEUE);
1827 if (IS_ERR(t->recv_cq)) {
1828 pr_err("Can't create RDMA recv CQ\n");
1829 ret = PTR_ERR(t->recv_cq);
1834 memset(&qp_attr, 0, sizeof(qp_attr));
1835 qp_attr.event_handler = smb_direct_qpair_handler;
1836 qp_attr.qp_context = t;
1838 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1839 qp_attr.qp_type = IB_QPT_RC;
1840 qp_attr.send_cq = t->send_cq;
1841 qp_attr.recv_cq = t->recv_cq;
1842 qp_attr.port_num = ~0;
1844 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
1846 pr_err("Can't create RDMA QP: %d\n", ret);
1850 t->qp = t->cm_id->qp;
1851 t->cm_id->event_handler = smb_direct_cm_handler;
1853 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1854 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
1855 int pages_per_mr, mr_count;
1857 pages_per_mr = min_t(int, pages_per_rw,
1858 t->cm_id->device->attrs.max_fast_reg_page_list_len);
1859 mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
1860 atomic_read(&t->rw_avail_ops);
1861 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
1862 IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
1864 pr_err("failed to init mr pool count %d pages %d\n",
1865 mr_count, pages_per_mr);
1873 ib_destroy_qp(t->qp);
1877 ib_destroy_cq(t->recv_cq);
1881 ib_destroy_cq(t->send_cq);
1885 ib_dealloc_pd(t->pd);
1891 static int smb_direct_prepare(struct ksmbd_transport *t)
1893 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1894 struct smb_direct_recvmsg *recvmsg;
1895 struct smb_direct_negotiate_req *req;
1898 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
1899 ret = wait_event_interruptible_timeout(st->wait_status,
1900 st->negotiation_requested ||
1901 st->status == SMB_DIRECT_CS_DISCONNECTED,
1902 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
1903 if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
1904 return ret < 0 ? ret : -ETIMEDOUT;
1906 recvmsg = get_first_reassembly(st);
1908 return -ECONNABORTED;
1910 ret = smb_direct_check_recvmsg(recvmsg);
1911 if (ret == -ECONNABORTED)
1914 req = (struct smb_direct_negotiate_req *)recvmsg->packet;
1915 st->max_recv_size = min_t(int, st->max_recv_size,
1916 le32_to_cpu(req->preferred_send_size));
1917 st->max_send_size = min_t(int, st->max_send_size,
1918 le32_to_cpu(req->max_receive_size));
1919 st->max_fragmented_send_size =
1920 le32_to_cpu(req->max_fragmented_size);
1921 st->max_fragmented_recv_size =
1922 (st->recv_credit_max * st->max_recv_size) / 2;
1924 ret = smb_direct_send_negotiate_response(st, ret);
1926 spin_lock_irq(&st->reassembly_queue_lock);
1927 st->reassembly_queue_length--;
1928 list_del(&recvmsg->list);
1929 spin_unlock_irq(&st->reassembly_queue_lock);
1930 put_recvmsg(st, recvmsg);
1935 static int smb_direct_connect(struct smb_direct_transport *st)
1938 struct ib_qp_cap qp_cap;
1940 ret = smb_direct_init_params(st, &qp_cap);
1942 pr_err("Can't configure RDMA parameters\n");
1946 ret = smb_direct_create_pools(st);
1948 pr_err("Can't init RDMA pool: %d\n", ret);
1952 ret = smb_direct_create_qpair(st, &qp_cap);
1954 pr_err("Can't accept RDMA client: %d\n", ret);
1958 ret = smb_direct_prepare_negotiation(st);
1960 pr_err("Can't negotiate: %d\n", ret);
1966 static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
1968 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
1970 if (attrs->max_fast_reg_page_list_len == 0)
1975 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
1977 struct smb_direct_transport *t;
1980 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
1982 "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
1983 new_cm_id->device->attrs.device_cap_flags);
1984 return -EPROTONOSUPPORT;
1987 t = alloc_transport(new_cm_id);
1991 ret = smb_direct_connect(t);
1995 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
1996 KSMBD_TRANS(t)->conn, "ksmbd:r%u",
1998 if (IS_ERR(KSMBD_TRANS(t)->handler)) {
1999 ret = PTR_ERR(KSMBD_TRANS(t)->handler);
2000 pr_err("Can't start thread\n");
2010 static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
2011 struct rdma_cm_event *event)
2013 switch (event->event) {
2014 case RDMA_CM_EVENT_CONNECT_REQUEST: {
2015 int ret = smb_direct_handle_connect_request(cm_id);
2018 pr_err("Can't create transport: %d\n", ret);
2022 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
2027 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
2028 cm_id, rdma_event_msg(event->event), event->event);
2034 static int smb_direct_listen(int port)
2037 struct rdma_cm_id *cm_id;
2038 struct sockaddr_in sin = {
2039 .sin_family = AF_INET,
2040 .sin_addr.s_addr = htonl(INADDR_ANY),
2041 .sin_port = htons(port),
2044 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
2045 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
2046 if (IS_ERR(cm_id)) {
2047 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
2048 return PTR_ERR(cm_id);
2051 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2053 pr_err("Can't bind: %d\n", ret);
2057 smb_direct_listener.cm_id = cm_id;
2059 ret = rdma_listen(cm_id, 10);
2061 pr_err("Can't listen: %d\n", ret);
2066 smb_direct_listener.cm_id = NULL;
2067 rdma_destroy_id(cm_id);
2071 static int smb_direct_ib_client_add(struct ib_device *ib_dev)
2073 struct smb_direct_device *smb_dev;
2075 /* Set 5445 port if device type is iWARP(No IB) */
2076 if (ib_dev->node_type != RDMA_NODE_IB_CA)
2077 smb_direct_port = SMB_DIRECT_PORT_IWARP;
2079 if (!ib_dev->ops.get_netdev ||
2080 !rdma_frwr_is_supported(&ib_dev->attrs))
2083 smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
2086 smb_dev->ib_dev = ib_dev;
2088 write_lock(&smb_direct_device_lock);
2089 list_add(&smb_dev->list, &smb_direct_device_list);
2090 write_unlock(&smb_direct_device_lock);
2092 ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
2096 static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
2099 struct smb_direct_device *smb_dev, *tmp;
2101 write_lock(&smb_direct_device_lock);
2102 list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
2103 if (smb_dev->ib_dev == ib_dev) {
2104 list_del(&smb_dev->list);
2109 write_unlock(&smb_direct_device_lock);
2112 static struct ib_client smb_direct_ib_client = {
2113 .name = "ksmbd_smb_direct_ib",
2114 .add = smb_direct_ib_client_add,
2115 .remove = smb_direct_ib_client_remove,
2118 int ksmbd_rdma_init(void)
2122 smb_direct_listener.cm_id = NULL;
2124 ret = ib_register_client(&smb_direct_ib_client);
2126 pr_err("failed to ib_register_client\n");
2130 /* When a client is running out of send credits, the credits are
2131 * granted by the server's sending a packet using this queue.
2132 * This avoids the situation that a clients cannot send packets
2133 * for lack of credits
2135 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
2136 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
2140 ret = smb_direct_listen(smb_direct_port);
2142 destroy_workqueue(smb_direct_wq);
2143 smb_direct_wq = NULL;
2144 pr_err("Can't listen: %d\n", ret);
2148 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
2149 smb_direct_listener.cm_id);
2153 void ksmbd_rdma_destroy(void)
2155 if (!smb_direct_listener.cm_id)
2158 ib_unregister_client(&smb_direct_ib_client);
2159 rdma_destroy_id(smb_direct_listener.cm_id);
2161 smb_direct_listener.cm_id = NULL;
2163 if (smb_direct_wq) {
2164 destroy_workqueue(smb_direct_wq);
2165 smb_direct_wq = NULL;
2169 bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
2171 struct smb_direct_device *smb_dev;
2173 bool rdma_capable = false;
2175 read_lock(&smb_direct_device_lock);
2176 list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
2177 for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
2178 struct net_device *ndev;
2180 ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
2185 if (ndev == netdev) {
2187 rdma_capable = true;
2194 read_unlock(&smb_direct_device_lock);
2196 if (rdma_capable == false) {
2197 struct ib_device *ibdev;
2199 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
2201 if (rdma_frwr_is_supported(&ibdev->attrs))
2202 rdma_capable = true;
2203 ib_device_put(ibdev);
2207 return rdma_capable;
2210 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
2211 .prepare = smb_direct_prepare,
2212 .disconnect = smb_direct_disconnect,
2213 .shutdown = smb_direct_shutdown,
2214 .writev = smb_direct_writev,
2215 .read = smb_direct_read,
2216 .rdma_read = smb_direct_rdma_read,
2217 .rdma_write = smb_direct_rdma_write,