2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <linux/nospec.h>
49 #include <rdma/rdma_user_cm.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/rdma_cm.h>
52 #include <rdma/rdma_cm_ib.h>
53 #include <rdma/ib_addr.h>
55 #include <rdma/ib_cm.h>
56 #include <rdma/rdma_netlink.h>
57 #include "core_priv.h"
59 MODULE_AUTHOR("Sean Hefty");
60 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
61 MODULE_LICENSE("Dual BSD/GPL");
63 static unsigned int max_backlog = 1024;
65 static struct ctl_table_header *ucma_ctl_table_hdr;
66 static struct ctl_table ucma_ctl_table[] = {
68 .procname = "max_backlog",
70 .maxlen = sizeof max_backlog,
72 .proc_handler = proc_dointvec,
80 struct list_head ctx_list;
81 struct list_head event_list;
82 wait_queue_head_t poll_wait;
83 struct workqueue_struct *close_wq;
88 struct completion comp;
93 struct ucma_file *file;
94 struct rdma_cm_id *cm_id;
98 struct list_head list;
99 struct list_head mc_list;
100 /* mark that device is in process of destroying the internal HW
101 * resources, protected by the ctx_table lock
104 /* sync between removal event and id destroy, protected by file mut */
106 struct work_struct close_work;
109 struct ucma_multicast {
110 struct ucma_context *ctx;
116 struct list_head list;
117 struct sockaddr_storage addr;
121 struct ucma_context *ctx;
122 struct ucma_multicast *mc;
123 struct list_head list;
124 struct rdma_cm_id *cm_id;
125 struct rdma_ucm_event_resp resp;
126 struct work_struct close_work;
129 static DEFINE_XARRAY_ALLOC(ctx_table);
130 static DEFINE_XARRAY_ALLOC(multicast_table);
132 static const struct file_operations ucma_fops;
134 static inline struct ucma_context *_ucma_find_context(int id,
135 struct ucma_file *file)
137 struct ucma_context *ctx;
139 ctx = xa_load(&ctx_table, id);
141 ctx = ERR_PTR(-ENOENT);
142 else if (ctx->file != file || !ctx->cm_id)
143 ctx = ERR_PTR(-EINVAL);
147 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
149 struct ucma_context *ctx;
152 ctx = _ucma_find_context(id, file);
157 refcount_inc(&ctx->ref);
159 xa_unlock(&ctx_table);
163 static void ucma_put_ctx(struct ucma_context *ctx)
165 if (refcount_dec_and_test(&ctx->ref))
166 complete(&ctx->comp);
170 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
173 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
175 struct ucma_context *ctx = ucma_get_ctx(file, id);
179 if (!ctx->cm_id->device) {
181 return ERR_PTR(-EINVAL);
186 static void ucma_close_event_id(struct work_struct *work)
188 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
190 rdma_destroy_id(uevent_close->cm_id);
194 static void ucma_close_id(struct work_struct *work)
196 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
198 /* once all inflight tasks are finished, we close all underlying
199 * resources. The context is still alive till its explicit destryoing
203 wait_for_completion(&ctx->comp);
204 /* No new events will be generated after destroying the id. */
205 rdma_destroy_id(ctx->cm_id);
208 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
210 struct ucma_context *ctx;
212 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
216 INIT_WORK(&ctx->close_work, ucma_close_id);
217 refcount_set(&ctx->ref, 1);
218 init_completion(&ctx->comp);
219 INIT_LIST_HEAD(&ctx->mc_list);
221 mutex_init(&ctx->mutex);
223 if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
226 list_add_tail(&ctx->list, &file->ctx_list);
234 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
236 struct ucma_multicast *mc;
238 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
243 if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
246 list_add_tail(&mc->list, &ctx->mc_list);
254 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
255 struct rdma_conn_param *src)
257 if (src->private_data_len)
258 memcpy(dst->private_data, src->private_data,
259 src->private_data_len);
260 dst->private_data_len = src->private_data_len;
261 dst->responder_resources =src->responder_resources;
262 dst->initiator_depth = src->initiator_depth;
263 dst->flow_control = src->flow_control;
264 dst->retry_count = src->retry_count;
265 dst->rnr_retry_count = src->rnr_retry_count;
267 dst->qp_num = src->qp_num;
270 static void ucma_copy_ud_event(struct ib_device *device,
271 struct rdma_ucm_ud_param *dst,
272 struct rdma_ud_param *src)
274 if (src->private_data_len)
275 memcpy(dst->private_data, src->private_data,
276 src->private_data_len);
277 dst->private_data_len = src->private_data_len;
278 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
279 dst->qp_num = src->qp_num;
280 dst->qkey = src->qkey;
283 static void ucma_set_event_context(struct ucma_context *ctx,
284 struct rdma_cm_event *event,
285 struct ucma_event *uevent)
288 switch (event->event) {
289 case RDMA_CM_EVENT_MULTICAST_JOIN:
290 case RDMA_CM_EVENT_MULTICAST_ERROR:
291 uevent->mc = (struct ucma_multicast *)
292 event->param.ud.private_data;
293 uevent->resp.uid = uevent->mc->uid;
294 uevent->resp.id = uevent->mc->id;
297 uevent->resp.uid = ctx->uid;
298 uevent->resp.id = ctx->id;
303 /* Called with file->mut locked for the relevant context. */
304 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
306 struct ucma_context *ctx = cm_id->context;
307 struct ucma_event *con_req_eve;
313 /* only if context is pointing to cm_id that it owns it and can be
314 * queued to be closed, otherwise that cm_id is an inflight one that
315 * is part of that context event list pending to be detached and
316 * reattached to its new context as part of ucma_get_event,
317 * handled separately below.
319 if (ctx->cm_id == cm_id) {
322 xa_unlock(&ctx_table);
323 queue_work(ctx->file->close_wq, &ctx->close_work);
327 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
328 if (con_req_eve->cm_id == cm_id &&
329 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
330 list_del(&con_req_eve->list);
331 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
332 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
338 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
341 static int ucma_event_handler(struct rdma_cm_id *cm_id,
342 struct rdma_cm_event *event)
344 struct ucma_event *uevent;
345 struct ucma_context *ctx = cm_id->context;
348 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
350 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
352 mutex_lock(&ctx->file->mut);
353 uevent->cm_id = cm_id;
354 ucma_set_event_context(ctx, event, uevent);
355 uevent->resp.event = event->event;
356 uevent->resp.status = event->status;
357 if (cm_id->qp_type == IB_QPT_UD)
358 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
361 ucma_copy_conn_event(&uevent->resp.param.conn,
364 uevent->resp.ece.vendor_id = event->ece.vendor_id;
365 uevent->resp.ece.attr_mod = event->ece.attr_mod;
367 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
374 } else if (!ctx->uid || ctx->cm_id != cm_id) {
376 * We ignore events for new connections until userspace has set
377 * their context. This can only happen if an error occurs on a
378 * new connection before the user accepts it. This is okay,
379 * since the accept will just fail later. However, we do need
380 * to release the underlying HW resources in case of a device
383 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
384 ucma_removal_event_handler(cm_id);
390 list_add_tail(&uevent->list, &ctx->file->event_list);
391 wake_up_interruptible(&ctx->file->poll_wait);
392 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
393 ucma_removal_event_handler(cm_id);
395 mutex_unlock(&ctx->file->mut);
399 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
400 int in_len, int out_len)
402 struct ucma_context *ctx;
403 struct rdma_ucm_get_event cmd;
404 struct ucma_event *uevent;
408 * Old 32 bit user space does not send the 4 byte padding in the
409 * reserved field. We don't care, allow it to keep working.
411 if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
412 sizeof(uevent->resp.ece))
415 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
418 mutex_lock(&file->mut);
419 while (list_empty(&file->event_list)) {
420 mutex_unlock(&file->mut);
422 if (file->filp->f_flags & O_NONBLOCK)
425 if (wait_event_interruptible(file->poll_wait,
426 !list_empty(&file->event_list)))
429 mutex_lock(&file->mut);
432 uevent = list_entry(file->event_list.next, struct ucma_event, list);
434 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
435 ctx = ucma_alloc_ctx(file);
440 uevent->ctx->backlog++;
441 ctx->cm_id = uevent->cm_id;
442 ctx->cm_id->context = ctx;
443 uevent->resp.id = ctx->id;
446 if (copy_to_user(u64_to_user_ptr(cmd.response),
448 min_t(size_t, out_len, sizeof(uevent->resp)))) {
453 list_del(&uevent->list);
454 uevent->ctx->events_reported++;
456 uevent->mc->events_reported++;
459 mutex_unlock(&file->mut);
463 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
467 *qp_type = IB_QPT_RC;
471 *qp_type = IB_QPT_UD;
474 *qp_type = cmd->qp_type;
481 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
482 int in_len, int out_len)
484 struct rdma_ucm_create_id cmd;
485 struct rdma_ucm_create_id_resp resp;
486 struct ucma_context *ctx;
487 struct rdma_cm_id *cm_id;
488 enum ib_qp_type qp_type;
491 if (out_len < sizeof(resp))
494 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
497 ret = ucma_get_qp_type(&cmd, &qp_type);
501 mutex_lock(&file->mut);
502 ctx = ucma_alloc_ctx(file);
503 mutex_unlock(&file->mut);
508 cm_id = __rdma_create_id(current->nsproxy->net_ns,
509 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
511 ret = PTR_ERR(cm_id);
516 if (copy_to_user(u64_to_user_ptr(cmd.response),
517 &resp, sizeof(resp))) {
526 rdma_destroy_id(cm_id);
528 xa_erase(&ctx_table, ctx->id);
529 mutex_lock(&file->mut);
530 list_del(&ctx->list);
531 mutex_unlock(&file->mut);
536 static void ucma_cleanup_multicast(struct ucma_context *ctx)
538 struct ucma_multicast *mc, *tmp;
540 mutex_lock(&ctx->file->mut);
541 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
543 xa_erase(&multicast_table, mc->id);
546 mutex_unlock(&ctx->file->mut);
549 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
551 struct ucma_event *uevent, *tmp;
553 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
554 if (uevent->mc != mc)
557 list_del(&uevent->list);
563 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
564 * this point, no new events will be reported from the hardware. However, we
565 * still need to cleanup the UCMA context for this ID. Specifically, there
566 * might be events that have not yet been consumed by the user space software.
567 * These might include pending connect requests which we have not completed
568 * processing. We cannot call rdma_destroy_id while holding the lock of the
569 * context (file->mut), as it might cause a deadlock. We therefore extract all
570 * relevant events from the context pending events list while holding the
571 * mutex. After that we release them as needed.
573 static int ucma_free_ctx(struct ucma_context *ctx)
576 struct ucma_event *uevent, *tmp;
580 ucma_cleanup_multicast(ctx);
582 /* Cleanup events not yet reported to the user. */
583 mutex_lock(&ctx->file->mut);
584 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
585 if (uevent->ctx == ctx)
586 list_move_tail(&uevent->list, &list);
588 list_del(&ctx->list);
589 mutex_unlock(&ctx->file->mut);
591 list_for_each_entry_safe(uevent, tmp, &list, list) {
592 list_del(&uevent->list);
593 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
594 rdma_destroy_id(uevent->cm_id);
598 events_reported = ctx->events_reported;
599 mutex_destroy(&ctx->mutex);
601 return events_reported;
604 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
605 int in_len, int out_len)
607 struct rdma_ucm_destroy_id cmd;
608 struct rdma_ucm_destroy_id_resp resp;
609 struct ucma_context *ctx;
612 if (out_len < sizeof(resp))
615 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
619 ctx = _ucma_find_context(cmd.id, file);
621 __xa_erase(&ctx_table, ctx->id);
622 xa_unlock(&ctx_table);
627 mutex_lock(&ctx->file->mut);
629 mutex_unlock(&ctx->file->mut);
631 flush_workqueue(ctx->file->close_wq);
632 /* At this point it's guaranteed that there is no inflight
636 xa_unlock(&ctx_table);
638 wait_for_completion(&ctx->comp);
639 rdma_destroy_id(ctx->cm_id);
641 xa_unlock(&ctx_table);
644 resp.events_reported = ucma_free_ctx(ctx);
645 if (copy_to_user(u64_to_user_ptr(cmd.response),
646 &resp, sizeof(resp)))
652 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
653 int in_len, int out_len)
655 struct rdma_ucm_bind_ip cmd;
656 struct ucma_context *ctx;
659 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
662 if (!rdma_addr_size_in6(&cmd.addr))
665 ctx = ucma_get_ctx(file, cmd.id);
669 mutex_lock(&ctx->mutex);
670 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
671 mutex_unlock(&ctx->mutex);
677 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
678 int in_len, int out_len)
680 struct rdma_ucm_bind cmd;
681 struct ucma_context *ctx;
684 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
687 if (cmd.reserved || !cmd.addr_size ||
688 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
691 ctx = ucma_get_ctx(file, cmd.id);
695 mutex_lock(&ctx->mutex);
696 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
697 mutex_unlock(&ctx->mutex);
702 static ssize_t ucma_resolve_ip(struct ucma_file *file,
703 const char __user *inbuf,
704 int in_len, int out_len)
706 struct rdma_ucm_resolve_ip cmd;
707 struct ucma_context *ctx;
710 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
713 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
714 !rdma_addr_size_in6(&cmd.dst_addr))
717 ctx = ucma_get_ctx(file, cmd.id);
721 mutex_lock(&ctx->mutex);
722 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
723 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
724 mutex_unlock(&ctx->mutex);
729 static ssize_t ucma_resolve_addr(struct ucma_file *file,
730 const char __user *inbuf,
731 int in_len, int out_len)
733 struct rdma_ucm_resolve_addr cmd;
734 struct ucma_context *ctx;
737 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
741 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
742 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
745 ctx = ucma_get_ctx(file, cmd.id);
749 mutex_lock(&ctx->mutex);
750 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
751 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
752 mutex_unlock(&ctx->mutex);
757 static ssize_t ucma_resolve_route(struct ucma_file *file,
758 const char __user *inbuf,
759 int in_len, int out_len)
761 struct rdma_ucm_resolve_route cmd;
762 struct ucma_context *ctx;
765 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
768 ctx = ucma_get_ctx_dev(file, cmd.id);
772 mutex_lock(&ctx->mutex);
773 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
774 mutex_unlock(&ctx->mutex);
779 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
780 struct rdma_route *route)
782 struct rdma_dev_addr *dev_addr;
784 resp->num_paths = route->num_paths;
785 switch (route->num_paths) {
787 dev_addr = &route->addr.dev_addr;
788 rdma_addr_get_dgid(dev_addr,
789 (union ib_gid *) &resp->ib_route[0].dgid);
790 rdma_addr_get_sgid(dev_addr,
791 (union ib_gid *) &resp->ib_route[0].sgid);
792 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
795 ib_copy_path_rec_to_user(&resp->ib_route[1],
796 &route->path_rec[1]);
799 ib_copy_path_rec_to_user(&resp->ib_route[0],
800 &route->path_rec[0]);
807 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
808 struct rdma_route *route)
811 resp->num_paths = route->num_paths;
812 switch (route->num_paths) {
814 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
815 (union ib_gid *)&resp->ib_route[0].dgid);
816 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
817 (union ib_gid *)&resp->ib_route[0].sgid);
818 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
821 ib_copy_path_rec_to_user(&resp->ib_route[1],
822 &route->path_rec[1]);
825 ib_copy_path_rec_to_user(&resp->ib_route[0],
826 &route->path_rec[0]);
833 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
834 struct rdma_route *route)
836 struct rdma_dev_addr *dev_addr;
838 dev_addr = &route->addr.dev_addr;
839 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
840 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
843 static ssize_t ucma_query_route(struct ucma_file *file,
844 const char __user *inbuf,
845 int in_len, int out_len)
847 struct rdma_ucm_query cmd;
848 struct rdma_ucm_query_route_resp resp;
849 struct ucma_context *ctx;
850 struct sockaddr *addr;
853 if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
856 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
859 ctx = ucma_get_ctx(file, cmd.id);
863 mutex_lock(&ctx->mutex);
864 memset(&resp, 0, sizeof resp);
865 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
866 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
867 sizeof(struct sockaddr_in) :
868 sizeof(struct sockaddr_in6));
869 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
870 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
871 sizeof(struct sockaddr_in) :
872 sizeof(struct sockaddr_in6));
873 if (!ctx->cm_id->device)
876 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
877 resp.ibdev_index = ctx->cm_id->device->index;
878 resp.port_num = ctx->cm_id->port_num;
880 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
881 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
882 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
883 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
884 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
885 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
888 mutex_unlock(&ctx->mutex);
889 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
890 min_t(size_t, out_len, sizeof(resp))))
897 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
898 struct rdma_ucm_query_addr_resp *resp)
903 resp->node_guid = (__force __u64) cm_id->device->node_guid;
904 resp->ibdev_index = cm_id->device->index;
905 resp->port_num = cm_id->port_num;
906 resp->pkey = (__force __u16) cpu_to_be16(
907 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
910 static ssize_t ucma_query_addr(struct ucma_context *ctx,
911 void __user *response, int out_len)
913 struct rdma_ucm_query_addr_resp resp;
914 struct sockaddr *addr;
917 if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
920 memset(&resp, 0, sizeof resp);
922 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
923 resp.src_size = rdma_addr_size(addr);
924 memcpy(&resp.src_addr, addr, resp.src_size);
926 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
927 resp.dst_size = rdma_addr_size(addr);
928 memcpy(&resp.dst_addr, addr, resp.dst_size);
930 ucma_query_device_addr(ctx->cm_id, &resp);
932 if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
938 static ssize_t ucma_query_path(struct ucma_context *ctx,
939 void __user *response, int out_len)
941 struct rdma_ucm_query_path_resp *resp;
944 if (out_len < sizeof(*resp))
947 resp = kzalloc(out_len, GFP_KERNEL);
951 resp->num_paths = ctx->cm_id->route.num_paths;
952 for (i = 0, out_len -= sizeof(*resp);
953 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
954 i++, out_len -= sizeof(struct ib_path_rec_data)) {
955 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
957 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
958 IB_PATH_BIDIRECTIONAL;
959 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
960 struct sa_path_rec ib;
962 sa_convert_path_opa_to_ib(&ib, rec);
963 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
966 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
970 if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
977 static ssize_t ucma_query_gid(struct ucma_context *ctx,
978 void __user *response, int out_len)
980 struct rdma_ucm_query_addr_resp resp;
981 struct sockaddr_ib *addr;
984 if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
987 memset(&resp, 0, sizeof resp);
989 ucma_query_device_addr(ctx->cm_id, &resp);
991 addr = (struct sockaddr_ib *) &resp.src_addr;
992 resp.src_size = sizeof(*addr);
993 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
994 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
996 addr->sib_family = AF_IB;
997 addr->sib_pkey = (__force __be16) resp.pkey;
998 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
1000 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1001 &ctx->cm_id->route.addr.src_addr);
1004 addr = (struct sockaddr_ib *) &resp.dst_addr;
1005 resp.dst_size = sizeof(*addr);
1006 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
1007 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
1009 addr->sib_family = AF_IB;
1010 addr->sib_pkey = (__force __be16) resp.pkey;
1011 rdma_read_gids(ctx->cm_id, NULL,
1012 (union ib_gid *)&addr->sib_addr);
1013 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1014 &ctx->cm_id->route.addr.dst_addr);
1017 if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
1023 static ssize_t ucma_query(struct ucma_file *file,
1024 const char __user *inbuf,
1025 int in_len, int out_len)
1027 struct rdma_ucm_query cmd;
1028 struct ucma_context *ctx;
1029 void __user *response;
1032 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1035 response = u64_to_user_ptr(cmd.response);
1036 ctx = ucma_get_ctx(file, cmd.id);
1038 return PTR_ERR(ctx);
1040 mutex_lock(&ctx->mutex);
1041 switch (cmd.option) {
1042 case RDMA_USER_CM_QUERY_ADDR:
1043 ret = ucma_query_addr(ctx, response, out_len);
1045 case RDMA_USER_CM_QUERY_PATH:
1046 ret = ucma_query_path(ctx, response, out_len);
1048 case RDMA_USER_CM_QUERY_GID:
1049 ret = ucma_query_gid(ctx, response, out_len);
1055 mutex_unlock(&ctx->mutex);
1061 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1062 struct rdma_conn_param *dst,
1063 struct rdma_ucm_conn_param *src)
1065 dst->private_data = src->private_data;
1066 dst->private_data_len = src->private_data_len;
1067 dst->responder_resources =src->responder_resources;
1068 dst->initiator_depth = src->initiator_depth;
1069 dst->flow_control = src->flow_control;
1070 dst->retry_count = src->retry_count;
1071 dst->rnr_retry_count = src->rnr_retry_count;
1072 dst->srq = src->srq;
1073 dst->qp_num = src->qp_num & 0xFFFFFF;
1074 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1077 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1078 int in_len, int out_len)
1080 struct rdma_conn_param conn_param;
1081 struct rdma_ucm_ece ece = {};
1082 struct rdma_ucm_connect cmd;
1083 struct ucma_context *ctx;
1087 if (in_len < offsetofend(typeof(cmd), reserved))
1089 in_size = min_t(size_t, in_len, sizeof(cmd));
1090 if (copy_from_user(&cmd, inbuf, in_size))
1093 if (!cmd.conn_param.valid)
1096 ctx = ucma_get_ctx_dev(file, cmd.id);
1098 return PTR_ERR(ctx);
1100 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1101 if (offsetofend(typeof(cmd), ece) <= in_size) {
1102 ece.vendor_id = cmd.ece.vendor_id;
1103 ece.attr_mod = cmd.ece.attr_mod;
1106 mutex_lock(&ctx->mutex);
1107 ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
1108 mutex_unlock(&ctx->mutex);
1113 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1114 int in_len, int out_len)
1116 struct rdma_ucm_listen cmd;
1117 struct ucma_context *ctx;
1120 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1123 ctx = ucma_get_ctx(file, cmd.id);
1125 return PTR_ERR(ctx);
1127 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1128 cmd.backlog : max_backlog;
1129 mutex_lock(&ctx->mutex);
1130 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1131 mutex_unlock(&ctx->mutex);
1136 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1137 int in_len, int out_len)
1139 struct rdma_ucm_accept cmd;
1140 struct rdma_conn_param conn_param;
1141 struct rdma_ucm_ece ece = {};
1142 struct ucma_context *ctx;
1146 if (in_len < offsetofend(typeof(cmd), reserved))
1148 in_size = min_t(size_t, in_len, sizeof(cmd));
1149 if (copy_from_user(&cmd, inbuf, in_size))
1152 ctx = ucma_get_ctx_dev(file, cmd.id);
1154 return PTR_ERR(ctx);
1156 if (offsetofend(typeof(cmd), ece) <= in_size) {
1157 ece.vendor_id = cmd.ece.vendor_id;
1158 ece.attr_mod = cmd.ece.attr_mod;
1161 if (cmd.conn_param.valid) {
1162 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1163 mutex_lock(&file->mut);
1164 mutex_lock(&ctx->mutex);
1165 ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
1166 mutex_unlock(&ctx->mutex);
1169 mutex_unlock(&file->mut);
1171 mutex_lock(&ctx->mutex);
1172 ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
1173 mutex_unlock(&ctx->mutex);
1179 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1180 int in_len, int out_len)
1182 struct rdma_ucm_reject cmd;
1183 struct ucma_context *ctx;
1186 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1190 cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
1192 switch (cmd.reason) {
1193 case IB_CM_REJ_CONSUMER_DEFINED:
1194 case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
1200 ctx = ucma_get_ctx_dev(file, cmd.id);
1202 return PTR_ERR(ctx);
1204 mutex_lock(&ctx->mutex);
1205 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
1207 mutex_unlock(&ctx->mutex);
1212 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1213 int in_len, int out_len)
1215 struct rdma_ucm_disconnect cmd;
1216 struct ucma_context *ctx;
1219 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1222 ctx = ucma_get_ctx_dev(file, cmd.id);
1224 return PTR_ERR(ctx);
1226 mutex_lock(&ctx->mutex);
1227 ret = rdma_disconnect(ctx->cm_id);
1228 mutex_unlock(&ctx->mutex);
1233 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1234 const char __user *inbuf,
1235 int in_len, int out_len)
1237 struct rdma_ucm_init_qp_attr cmd;
1238 struct ib_uverbs_qp_attr resp;
1239 struct ucma_context *ctx;
1240 struct ib_qp_attr qp_attr;
1243 if (out_len < sizeof(resp))
1246 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1249 if (cmd.qp_state > IB_QPS_ERR)
1252 ctx = ucma_get_ctx_dev(file, cmd.id);
1254 return PTR_ERR(ctx);
1256 resp.qp_attr_mask = 0;
1257 memset(&qp_attr, 0, sizeof qp_attr);
1258 qp_attr.qp_state = cmd.qp_state;
1259 mutex_lock(&ctx->mutex);
1260 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1261 mutex_unlock(&ctx->mutex);
1265 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1266 if (copy_to_user(u64_to_user_ptr(cmd.response),
1267 &resp, sizeof(resp)))
1275 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1276 void *optval, size_t optlen)
1281 case RDMA_OPTION_ID_TOS:
1282 if (optlen != sizeof(u8)) {
1286 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1288 case RDMA_OPTION_ID_REUSEADDR:
1289 if (optlen != sizeof(int)) {
1293 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1295 case RDMA_OPTION_ID_AFONLY:
1296 if (optlen != sizeof(int)) {
1300 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1302 case RDMA_OPTION_ID_ACK_TIMEOUT:
1303 if (optlen != sizeof(u8)) {
1307 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1316 static int ucma_set_ib_path(struct ucma_context *ctx,
1317 struct ib_path_rec_data *path_data, size_t optlen)
1319 struct sa_path_rec sa_path;
1320 struct rdma_cm_event event;
1323 if (optlen % sizeof(*path_data))
1326 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1327 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1328 IB_PATH_BIDIRECTIONAL))
1335 if (!ctx->cm_id->device)
1338 memset(&sa_path, 0, sizeof(sa_path));
1340 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1341 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1343 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1344 struct sa_path_rec opa;
1346 sa_convert_path_ib_to_opa(&opa, &sa_path);
1347 mutex_lock(&ctx->mutex);
1348 ret = rdma_set_ib_path(ctx->cm_id, &opa);
1349 mutex_unlock(&ctx->mutex);
1351 mutex_lock(&ctx->mutex);
1352 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1353 mutex_unlock(&ctx->mutex);
1358 memset(&event, 0, sizeof event);
1359 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1360 return ucma_event_handler(ctx->cm_id, &event);
1363 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1364 void *optval, size_t optlen)
1369 case RDMA_OPTION_IB_PATH:
1370 ret = ucma_set_ib_path(ctx, optval, optlen);
1379 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1380 int optname, void *optval, size_t optlen)
1385 case RDMA_OPTION_ID:
1386 mutex_lock(&ctx->mutex);
1387 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1388 mutex_unlock(&ctx->mutex);
1390 case RDMA_OPTION_IB:
1391 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1400 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1401 int in_len, int out_len)
1403 struct rdma_ucm_set_option cmd;
1404 struct ucma_context *ctx;
1408 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1411 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1414 ctx = ucma_get_ctx(file, cmd.id);
1416 return PTR_ERR(ctx);
1418 optval = memdup_user(u64_to_user_ptr(cmd.optval),
1420 if (IS_ERR(optval)) {
1421 ret = PTR_ERR(optval);
1425 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1434 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1435 int in_len, int out_len)
1437 struct rdma_ucm_notify cmd;
1438 struct ucma_context *ctx;
1441 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1444 ctx = ucma_get_ctx(file, cmd.id);
1446 return PTR_ERR(ctx);
1448 mutex_lock(&ctx->mutex);
1449 if (ctx->cm_id->device)
1450 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1451 mutex_unlock(&ctx->mutex);
1457 static ssize_t ucma_process_join(struct ucma_file *file,
1458 struct rdma_ucm_join_mcast *cmd, int out_len)
1460 struct rdma_ucm_create_id_resp resp;
1461 struct ucma_context *ctx;
1462 struct ucma_multicast *mc;
1463 struct sockaddr *addr;
1467 if (out_len < sizeof(resp))
1470 addr = (struct sockaddr *) &cmd->addr;
1471 if (cmd->addr_size != rdma_addr_size(addr))
1474 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1475 join_state = BIT(FULLMEMBER_JOIN);
1476 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1477 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1481 ctx = ucma_get_ctx_dev(file, cmd->id);
1483 return PTR_ERR(ctx);
1485 mutex_lock(&file->mut);
1486 mc = ucma_alloc_multicast(ctx);
1491 mc->join_state = join_state;
1493 memcpy(&mc->addr, addr, cmd->addr_size);
1494 mutex_lock(&ctx->mutex);
1495 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1497 mutex_unlock(&ctx->mutex);
1502 if (copy_to_user(u64_to_user_ptr(cmd->response),
1503 &resp, sizeof(resp))) {
1508 xa_store(&multicast_table, mc->id, mc, 0);
1510 mutex_unlock(&file->mut);
1515 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1516 ucma_cleanup_mc_events(mc);
1518 xa_erase(&multicast_table, mc->id);
1519 list_del(&mc->list);
1522 mutex_unlock(&file->mut);
1527 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1528 const char __user *inbuf,
1529 int in_len, int out_len)
1531 struct rdma_ucm_join_ip_mcast cmd;
1532 struct rdma_ucm_join_mcast join_cmd;
1534 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1537 join_cmd.response = cmd.response;
1538 join_cmd.uid = cmd.uid;
1539 join_cmd.id = cmd.id;
1540 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1541 if (!join_cmd.addr_size)
1544 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1545 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1547 return ucma_process_join(file, &join_cmd, out_len);
1550 static ssize_t ucma_join_multicast(struct ucma_file *file,
1551 const char __user *inbuf,
1552 int in_len, int out_len)
1554 struct rdma_ucm_join_mcast cmd;
1556 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1559 if (!rdma_addr_size_kss(&cmd.addr))
1562 return ucma_process_join(file, &cmd, out_len);
1565 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1566 const char __user *inbuf,
1567 int in_len, int out_len)
1569 struct rdma_ucm_destroy_id cmd;
1570 struct rdma_ucm_destroy_id_resp resp;
1571 struct ucma_multicast *mc;
1574 if (out_len < sizeof(resp))
1577 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1580 xa_lock(&multicast_table);
1581 mc = xa_load(&multicast_table, cmd.id);
1583 mc = ERR_PTR(-ENOENT);
1584 else if (mc->ctx->file != file)
1585 mc = ERR_PTR(-EINVAL);
1586 else if (!refcount_inc_not_zero(&mc->ctx->ref))
1587 mc = ERR_PTR(-ENXIO);
1589 __xa_erase(&multicast_table, mc->id);
1590 xa_unlock(&multicast_table);
1597 mutex_lock(&mc->ctx->mutex);
1598 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1599 mutex_unlock(&mc->ctx->mutex);
1601 mutex_lock(&mc->ctx->file->mut);
1602 ucma_cleanup_mc_events(mc);
1603 list_del(&mc->list);
1604 mutex_unlock(&mc->ctx->file->mut);
1606 ucma_put_ctx(mc->ctx);
1607 resp.events_reported = mc->events_reported;
1610 if (copy_to_user(u64_to_user_ptr(cmd.response),
1611 &resp, sizeof(resp)))
1617 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1619 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1620 if (file1 < file2) {
1621 mutex_lock(&file1->mut);
1622 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1624 mutex_lock(&file2->mut);
1625 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1629 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1631 if (file1 < file2) {
1632 mutex_unlock(&file2->mut);
1633 mutex_unlock(&file1->mut);
1635 mutex_unlock(&file1->mut);
1636 mutex_unlock(&file2->mut);
1640 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1642 struct ucma_event *uevent, *tmp;
1644 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1645 if (uevent->ctx == ctx)
1646 list_move_tail(&uevent->list, &file->event_list);
1649 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1650 const char __user *inbuf,
1651 int in_len, int out_len)
1653 struct rdma_ucm_migrate_id cmd;
1654 struct rdma_ucm_migrate_resp resp;
1655 struct ucma_context *ctx;
1657 struct ucma_file *cur_file;
1660 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1663 /* Get current fd to protect against it being closed */
1667 if (f.file->f_op != &ucma_fops) {
1672 /* Validate current fd and prevent destruction of id. */
1673 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1679 cur_file = ctx->file;
1680 if (cur_file == new_file) {
1681 resp.events_reported = ctx->events_reported;
1686 * Migrate events between fd's, maintaining order, and avoiding new
1687 * events being added before existing events.
1689 ucma_lock_files(cur_file, new_file);
1690 xa_lock(&ctx_table);
1692 list_move_tail(&ctx->list, &new_file->ctx_list);
1693 ucma_move_events(ctx, new_file);
1694 ctx->file = new_file;
1695 resp.events_reported = ctx->events_reported;
1697 xa_unlock(&ctx_table);
1698 ucma_unlock_files(cur_file, new_file);
1701 if (copy_to_user(u64_to_user_ptr(cmd.response),
1702 &resp, sizeof(resp)))
1711 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1712 const char __user *inbuf,
1713 int in_len, int out_len) = {
1714 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1715 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1716 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1717 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1718 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1719 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1720 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1721 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1722 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1723 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1724 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1725 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1726 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1727 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1728 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1729 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1730 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1731 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1732 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1733 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1734 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1735 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1736 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1739 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1740 size_t len, loff_t *pos)
1742 struct ucma_file *file = filp->private_data;
1743 struct rdma_ucm_cmd_hdr hdr;
1746 if (!ib_safe_file_access(filp)) {
1747 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1748 task_tgid_vnr(current), current->comm);
1752 if (len < sizeof(hdr))
1755 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1758 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1760 hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1762 if (hdr.in + sizeof(hdr) > len)
1765 if (!ucma_cmd_table[hdr.cmd])
1768 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1775 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1777 struct ucma_file *file = filp->private_data;
1780 poll_wait(filp, &file->poll_wait, wait);
1782 if (!list_empty(&file->event_list))
1783 mask = EPOLLIN | EPOLLRDNORM;
1789 * ucma_open() does not need the BKL:
1791 * - no global state is referred to;
1792 * - there is no ioctl method to race against;
1793 * - no further module initialization is required for open to work
1794 * after the device is registered.
1796 static int ucma_open(struct inode *inode, struct file *filp)
1798 struct ucma_file *file;
1800 file = kmalloc(sizeof *file, GFP_KERNEL);
1804 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1806 if (!file->close_wq) {
1811 INIT_LIST_HEAD(&file->event_list);
1812 INIT_LIST_HEAD(&file->ctx_list);
1813 init_waitqueue_head(&file->poll_wait);
1814 mutex_init(&file->mut);
1816 filp->private_data = file;
1819 return stream_open(inode, filp);
1822 static int ucma_close(struct inode *inode, struct file *filp)
1824 struct ucma_file *file = filp->private_data;
1825 struct ucma_context *ctx, *tmp;
1827 mutex_lock(&file->mut);
1828 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1829 ctx->destroying = 1;
1830 mutex_unlock(&file->mut);
1832 xa_erase(&ctx_table, ctx->id);
1833 flush_workqueue(file->close_wq);
1834 /* At that step once ctx was marked as destroying and workqueue
1835 * was flushed we are safe from any inflights handlers that
1836 * might put other closing task.
1838 xa_lock(&ctx_table);
1839 if (!ctx->closing) {
1840 xa_unlock(&ctx_table);
1842 wait_for_completion(&ctx->comp);
1843 /* rdma_destroy_id ensures that no event handlers are
1844 * inflight for that id before releasing it.
1846 rdma_destroy_id(ctx->cm_id);
1848 xa_unlock(&ctx_table);
1852 mutex_lock(&file->mut);
1854 mutex_unlock(&file->mut);
1855 destroy_workqueue(file->close_wq);
1860 static const struct file_operations ucma_fops = {
1861 .owner = THIS_MODULE,
1863 .release = ucma_close,
1864 .write = ucma_write,
1866 .llseek = no_llseek,
1869 static struct miscdevice ucma_misc = {
1870 .minor = MISC_DYNAMIC_MINOR,
1872 .nodename = "infiniband/rdma_cm",
1877 static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
1879 res->abi = RDMA_USER_CM_ABI_VERSION;
1880 res->cdev = ucma_misc.this_device;
1884 static struct ib_client rdma_cma_client = {
1886 .get_global_nl_info = ucma_get_global_nl_info,
1888 MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
1890 static ssize_t show_abi_version(struct device *dev,
1891 struct device_attribute *attr,
1894 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1896 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1898 static int __init ucma_init(void)
1902 ret = misc_register(&ucma_misc);
1906 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1908 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1912 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1913 if (!ucma_ctl_table_hdr) {
1914 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1919 ret = ib_register_client(&rdma_cma_client);
1925 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1927 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1929 misc_deregister(&ucma_misc);
1933 static void __exit ucma_cleanup(void)
1935 ib_unregister_client(&rdma_cma_client);
1936 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1937 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1938 misc_deregister(&ucma_misc);
1941 module_init(ucma_init);
1942 module_exit(ucma_cleanup);