1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <linux/overflow.h>
7 #include <rdma/uverbs_std_types.h>
10 #include <rdma/uverbs_ioctl.h>
11 #include <rdma/opa_addr.h>
12 #include <rdma/ib_cache.h>
15 * This ioctl method allows calling any defined write or write_ex
16 * handler. This essentially replaces the hdr/ex_hdr system with the ioctl
17 * marshalling, and brings the non-ex path into the same marshalling as the ex
20 static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)(
21 struct uverbs_attr_bundle *attrs)
23 struct uverbs_api *uapi = attrs->ufile->device->uapi;
24 const struct uverbs_api_write_method *method_elm;
28 rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD);
32 method_elm = uapi_get_method(uapi, cmd);
33 if (IS_ERR(method_elm))
34 return PTR_ERR(method_elm);
36 uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN,
37 UVERBS_ATTR_CORE_OUT);
39 if (attrs->ucore.inlen < method_elm->req_size ||
40 attrs->ucore.outlen < method_elm->resp_size)
43 attrs->uobject = NULL;
44 rc = method_elm->handler(attrs);
46 uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true,
51 DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE,
52 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD,
53 enum ib_uverbs_write_cmds,
55 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN,
56 UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
58 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT,
59 UVERBS_ATTR_MIN_SIZE(0),
64 gather_objects_handle(struct ib_uverbs_file *ufile,
65 const struct uverbs_api_object *uapi_object,
66 struct uverbs_attr_bundle *attrs,
70 u64 max_count = out_len / sizeof(u32);
71 struct ib_uobject *obj;
75 /* Allocated memory that cannot page out where we gather
76 * all object ids under a spin_lock.
78 handles = uverbs_zalloc(attrs, out_len);
82 spin_lock_irq(&ufile->uobjects_lock);
83 list_for_each_entry(obj, &ufile->uobjects, list) {
86 if (obj->uapi_object != uapi_object)
89 if (count >= max_count)
92 handles[count] = obj_id;
95 spin_unlock_irq(&ufile->uobjects_lock);
101 static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
102 struct uverbs_attr_bundle *attrs)
104 const struct uverbs_api_object *uapi_object;
111 out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST);
112 if (out_len <= 0 || (out_len % sizeof(u32) != 0))
115 ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID);
119 uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
123 handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
126 return PTR_ERR(handles);
128 ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles,
129 sizeof(u32) * total);
133 ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total,
139 void copy_port_attr_to_resp(struct ib_port_attr *attr,
140 struct ib_uverbs_query_port_resp *resp,
141 struct ib_device *ib_dev, u8 port_num)
143 resp->state = attr->state;
144 resp->max_mtu = attr->max_mtu;
145 resp->active_mtu = attr->active_mtu;
146 resp->gid_tbl_len = attr->gid_tbl_len;
147 resp->port_cap_flags = make_port_cap_flags(attr);
148 resp->max_msg_sz = attr->max_msg_sz;
149 resp->bad_pkey_cntr = attr->bad_pkey_cntr;
150 resp->qkey_viol_cntr = attr->qkey_viol_cntr;
151 resp->pkey_tbl_len = attr->pkey_tbl_len;
153 if (rdma_is_grh_required(ib_dev, port_num))
154 resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED;
156 if (rdma_cap_opa_ah(ib_dev, port_num)) {
157 resp->lid = OPA_TO_IB_UCAST_LID(attr->lid);
158 resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid);
160 resp->lid = ib_lid_cpu16(attr->lid);
161 resp->sm_lid = ib_lid_cpu16(attr->sm_lid);
164 resp->lmc = attr->lmc;
165 resp->max_vl_num = attr->max_vl_num;
166 resp->sm_sl = attr->sm_sl;
167 resp->subnet_timeout = attr->subnet_timeout;
168 resp->init_type_reply = attr->init_type_reply;
169 resp->active_width = attr->active_width;
170 /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
171 resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
172 resp->phys_state = attr->phys_state;
173 resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
176 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
177 struct uverbs_attr_bundle *attrs)
179 struct ib_device *ib_dev;
180 struct ib_port_attr attr = {};
181 struct ib_uverbs_query_port_resp_ex resp = {};
182 struct ib_ucontext *ucontext;
186 ucontext = ib_uverbs_get_ucontext(attrs);
187 if (IS_ERR(ucontext))
188 return PTR_ERR(ucontext);
189 ib_dev = ucontext->device;
191 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
192 if (!ib_dev->ops.query_port)
195 ret = uverbs_get_const(&port_num, attrs,
196 UVERBS_ATTR_QUERY_PORT_PORT_NUM);
200 ret = ib_query_port(ib_dev, port_num, &attr);
204 copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
205 resp.port_cap_flags2 = attr.port_cap_flags2;
207 return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
208 &resp, sizeof(resp));
211 static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
212 struct uverbs_attr_bundle *attrs)
214 u32 num_comp = attrs->ufile->device->num_comp_vectors;
215 u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
218 ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
219 &num_comp, sizeof(num_comp));
220 if (IS_UVERBS_COPY_ERR(ret))
223 ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
224 &core_support, sizeof(core_support));
225 if (IS_UVERBS_COPY_ERR(ret))
228 ret = ib_alloc_ucontext(attrs);
231 ret = ib_init_ucontext(attrs);
233 kfree(attrs->context);
234 attrs->context = NULL;
240 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)(
241 struct uverbs_attr_bundle *attrs)
243 u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
244 struct ib_ucontext *ucontext;
245 struct ib_device *ib_dev;
249 ucontext = ib_uverbs_get_ucontext(attrs);
250 if (IS_ERR(ucontext))
251 return PTR_ERR(ucontext);
252 ib_dev = ucontext->device;
254 if (!ib_dev->ops.query_ucontext)
257 num_comp = attrs->ufile->device->num_comp_vectors;
258 ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
259 &num_comp, sizeof(num_comp));
260 if (IS_UVERBS_COPY_ERR(ret))
263 ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
264 &core_support, sizeof(core_support));
265 if (IS_UVERBS_COPY_ERR(ret))
268 return ucontext->device->ops.query_ucontext(ucontext, attrs);
271 static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs,
272 struct ib_uverbs_gid_entry *entries,
273 size_t num_entries, size_t user_entry_size)
275 const struct uverbs_attr *attr;
276 void __user *user_entries;
281 if (user_entry_size == sizeof(*entries)) {
282 ret = uverbs_copy_to(attrs,
283 UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
284 entries, sizeof(*entries) * num_entries);
288 copy_len = min_t(size_t, user_entry_size, sizeof(*entries));
289 attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
291 return PTR_ERR(attr);
293 user_entries = u64_to_user_ptr(attr->ptr_attr.data);
294 for (i = 0; i < num_entries; i++) {
295 if (copy_to_user(user_entries, entries, copy_len))
298 if (user_entry_size > sizeof(*entries)) {
299 if (clear_user(user_entries + sizeof(*entries),
300 user_entry_size - sizeof(*entries)))
305 user_entries += user_entry_size;
308 return uverbs_output_written(attrs,
309 UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
312 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
313 struct uverbs_attr_bundle *attrs)
315 struct ib_uverbs_gid_entry *entries;
316 struct ib_ucontext *ucontext;
317 struct ib_device *ib_dev;
318 size_t user_entry_size;
325 ret = uverbs_get_flags32(&flags, attrs,
326 UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0);
330 ret = uverbs_get_const(&user_entry_size, attrs,
331 UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE);
335 max_entries = uverbs_attr_ptr_get_array_size(
336 attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
338 if (max_entries <= 0)
341 ucontext = ib_uverbs_get_ucontext(attrs);
342 if (IS_ERR(ucontext))
343 return PTR_ERR(ucontext);
344 ib_dev = ucontext->device;
346 if (check_mul_overflow(max_entries, sizeof(*entries), &num_bytes))
349 entries = uverbs_zalloc(attrs, num_bytes);
353 num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
357 ret = copy_gid_entries_to_user(attrs, entries, num_entries,
362 ret = uverbs_copy_to(attrs,
363 UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
364 &num_entries, sizeof(num_entries));
368 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
369 struct uverbs_attr_bundle *attrs)
371 struct ib_uverbs_gid_entry entry = {};
372 const struct ib_gid_attr *gid_attr;
373 struct ib_ucontext *ucontext;
374 struct ib_device *ib_dev;
375 struct net_device *ndev;
381 ret = uverbs_get_flags32(&flags, attrs,
382 UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0);
386 ret = uverbs_get_const(&port_num, attrs,
387 UVERBS_ATTR_QUERY_GID_ENTRY_PORT);
391 ret = uverbs_get_const(&gid_index, attrs,
392 UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX);
396 ucontext = ib_uverbs_get_ucontext(attrs);
397 if (IS_ERR(ucontext))
398 return PTR_ERR(ucontext);
399 ib_dev = ucontext->device;
401 if (!rdma_is_port_valid(ib_dev, port_num))
404 gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
405 if (IS_ERR(gid_attr))
406 return PTR_ERR(gid_attr);
408 memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid));
409 entry.gid_index = gid_attr->index;
410 entry.port_num = gid_attr->port_num;
411 entry.gid_type = gid_attr->gid_type;
414 ndev = rdma_read_gid_attr_ndev_rcu(gid_attr);
416 if (PTR_ERR(ndev) != -ENODEV) {
422 entry.netdev_ifindex = ndev->ifindex;
426 ret = uverbs_copy_to_struct_or_zero(
427 attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry,
430 rdma_put_gid_attr(gid_attr);
434 DECLARE_UVERBS_NAMED_METHOD(
435 UVERBS_METHOD_GET_CONTEXT,
436 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
437 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
438 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
439 UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
442 DECLARE_UVERBS_NAMED_METHOD(
443 UVERBS_METHOD_QUERY_CONTEXT,
444 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
445 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
446 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
447 UVERBS_ATTR_TYPE(u64), UA_OPTIONAL));
449 DECLARE_UVERBS_NAMED_METHOD(
450 UVERBS_METHOD_INFO_HANDLES,
451 /* Also includes any device specific object ids */
452 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID,
453 enum uverbs_default_objects, UA_MANDATORY),
454 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES,
455 UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
456 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST,
457 UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL));
459 DECLARE_UVERBS_NAMED_METHOD(
460 UVERBS_METHOD_QUERY_PORT,
461 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY),
463 UVERBS_ATTR_QUERY_PORT_RESP,
464 UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
468 DECLARE_UVERBS_NAMED_METHOD(
469 UVERBS_METHOD_QUERY_GID_TABLE,
470 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64,
472 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32,
474 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
475 UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY),
476 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
477 UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
479 DECLARE_UVERBS_NAMED_METHOD(
480 UVERBS_METHOD_QUERY_GID_ENTRY,
481 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32,
483 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32,
485 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32,
487 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
488 UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry,
492 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
493 &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
494 &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
495 &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
496 &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT),
497 &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT),
498 &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE),
499 &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY));
501 const struct uapi_definition uverbs_def_obj_device[] = {
502 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE),