1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
19 #include <linux/xarray.h>
21 #define UVERBS_MODULE_NAME mlx5_ib
22 #include <rdma/uverbs_named_ioctl.h>
24 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
27 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
28 DEVX_OBJ_FLAGS_DCT = 1 << 1,
29 DEVX_OBJ_FLAGS_CQ = 1 << 2,
32 struct devx_async_data {
33 struct mlx5_ib_dev *mdev;
34 struct list_head list;
35 struct devx_async_cmd_event_file *ev_file;
36 struct mlx5_async_work cb_work;
38 /* must be last field in this structure */
39 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
42 struct devx_async_event_data {
43 struct list_head list; /* headed in ev_file->event_list */
44 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
47 /* first level XA value data structure */
49 struct xarray object_ids; /* second XA level, Key = object id */
50 struct list_head unaffiliated_list;
53 /* second level XA value data structure */
54 struct devx_obj_event {
56 struct list_head obj_sub_list;
59 struct devx_event_subscription {
60 struct list_head file_list; /* headed in ev_file->
61 * subscribed_events_list
63 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
66 struct list_head obj_list; /* headed in devx_object */
67 struct list_head event_list; /* headed in ev_file->event_list or in
68 * temp list via subscription
76 struct devx_async_event_file *ev_file;
77 struct eventfd_ctx *eventfd;
80 struct devx_async_event_file {
81 struct ib_uobject uobj;
82 /* Head of events that are subscribed to this FD */
83 struct list_head subscribed_events_list;
85 wait_queue_head_t poll_wait;
86 struct list_head event_list;
87 struct mlx5_ib_dev *dev;
94 struct mlx5_core_dev *mdev;
97 u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
100 struct devx_umem_reg_cmd {
103 u32 out[MLX5_ST_SZ_DW(create_umem_out)];
106 static struct mlx5_ib_ucontext *
107 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
109 return to_mucontext(ib_uverbs_get_ucontext(attrs));
112 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
114 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
115 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
121 /* 0 means not supported */
122 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
125 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
126 if (is_user && capable(CAP_NET_RAW) &&
127 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
128 cap |= MLX5_UCTX_CAP_RAW_TX;
129 if (is_user && capable(CAP_SYS_RAWIO) &&
130 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
131 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
132 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
134 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
135 MLX5_SET(uctx, uctx, cap, cap);
137 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
141 uid = MLX5_GET(create_uctx_out, out, uid);
145 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
147 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
148 u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
150 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
151 MLX5_SET(destroy_uctx_in, in, uid, uid);
153 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
156 static bool is_legacy_unaffiliated_event_num(u16 event_num)
159 case MLX5_EVENT_TYPE_PORT_CHANGE:
166 static bool is_legacy_obj_event_num(u16 event_num)
169 case MLX5_EVENT_TYPE_PATH_MIG:
170 case MLX5_EVENT_TYPE_COMM_EST:
171 case MLX5_EVENT_TYPE_SQ_DRAINED:
172 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
173 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
174 case MLX5_EVENT_TYPE_CQ_ERROR:
175 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
176 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
177 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
178 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
179 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
180 case MLX5_EVENT_TYPE_DCT_DRAINED:
181 case MLX5_EVENT_TYPE_COMP:
182 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
183 case MLX5_EVENT_TYPE_XRQ_ERROR:
190 static u16 get_legacy_obj_type(u16 opcode)
193 case MLX5_CMD_OP_CREATE_RQ:
194 return MLX5_EVENT_QUEUE_TYPE_RQ;
195 case MLX5_CMD_OP_CREATE_QP:
196 return MLX5_EVENT_QUEUE_TYPE_QP;
197 case MLX5_CMD_OP_CREATE_SQ:
198 return MLX5_EVENT_QUEUE_TYPE_SQ;
199 case MLX5_CMD_OP_CREATE_DCT:
200 return MLX5_EVENT_QUEUE_TYPE_DCT;
206 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
210 opcode = (obj->obj_id >> 32) & 0xffff;
212 if (is_legacy_obj_event_num(event_num))
213 return get_legacy_obj_type(opcode);
216 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
217 return (obj->obj_id >> 48);
218 case MLX5_CMD_OP_CREATE_RQ:
219 return MLX5_OBJ_TYPE_RQ;
220 case MLX5_CMD_OP_CREATE_QP:
221 return MLX5_OBJ_TYPE_QP;
222 case MLX5_CMD_OP_CREATE_SQ:
223 return MLX5_OBJ_TYPE_SQ;
224 case MLX5_CMD_OP_CREATE_DCT:
225 return MLX5_OBJ_TYPE_DCT;
226 case MLX5_CMD_OP_CREATE_TIR:
227 return MLX5_OBJ_TYPE_TIR;
228 case MLX5_CMD_OP_CREATE_TIS:
229 return MLX5_OBJ_TYPE_TIS;
230 case MLX5_CMD_OP_CREATE_PSV:
231 return MLX5_OBJ_TYPE_PSV;
232 case MLX5_OBJ_TYPE_MKEY:
233 return MLX5_OBJ_TYPE_MKEY;
234 case MLX5_CMD_OP_CREATE_RMP:
235 return MLX5_OBJ_TYPE_RMP;
236 case MLX5_CMD_OP_CREATE_XRC_SRQ:
237 return MLX5_OBJ_TYPE_XRC_SRQ;
238 case MLX5_CMD_OP_CREATE_XRQ:
239 return MLX5_OBJ_TYPE_XRQ;
240 case MLX5_CMD_OP_CREATE_RQT:
241 return MLX5_OBJ_TYPE_RQT;
242 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
243 return MLX5_OBJ_TYPE_FLOW_COUNTER;
244 case MLX5_CMD_OP_CREATE_CQ:
245 return MLX5_OBJ_TYPE_CQ;
251 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
253 switch (event_type) {
254 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
255 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
256 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
257 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
258 case MLX5_EVENT_TYPE_PATH_MIG:
259 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
260 case MLX5_EVENT_TYPE_COMM_EST:
261 case MLX5_EVENT_TYPE_SQ_DRAINED:
262 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
263 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
264 return eqe->data.qp_srq.type;
265 case MLX5_EVENT_TYPE_CQ_ERROR:
266 case MLX5_EVENT_TYPE_XRQ_ERROR:
268 case MLX5_EVENT_TYPE_DCT_DRAINED:
269 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
270 return MLX5_EVENT_QUEUE_TYPE_DCT;
272 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
276 static u32 get_dec_obj_id(u64 obj_id)
278 return (obj_id & 0xffffffff);
282 * As the obj_id in the firmware is not globally unique the object type
283 * must be considered upon checking for a valid object id.
284 * For that the opcode of the creator command is encoded as part of the obj_id.
286 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
288 return ((u64)opcode << 32) | obj_id;
291 static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
294 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
295 return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
296 case MLX5_CMD_OP_CREATE_UMEM:
297 return MLX5_GET(create_umem_out, out, umem_id);
298 case MLX5_CMD_OP_CREATE_MKEY:
299 return MLX5_GET(create_mkey_out, out, mkey_index);
300 case MLX5_CMD_OP_CREATE_CQ:
301 return MLX5_GET(create_cq_out, out, cqn);
302 case MLX5_CMD_OP_ALLOC_PD:
303 return MLX5_GET(alloc_pd_out, out, pd);
304 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
305 return MLX5_GET(alloc_transport_domain_out, out,
307 case MLX5_CMD_OP_CREATE_RMP:
308 return MLX5_GET(create_rmp_out, out, rmpn);
309 case MLX5_CMD_OP_CREATE_SQ:
310 return MLX5_GET(create_sq_out, out, sqn);
311 case MLX5_CMD_OP_CREATE_RQ:
312 return MLX5_GET(create_rq_out, out, rqn);
313 case MLX5_CMD_OP_CREATE_RQT:
314 return MLX5_GET(create_rqt_out, out, rqtn);
315 case MLX5_CMD_OP_CREATE_TIR:
316 return MLX5_GET(create_tir_out, out, tirn);
317 case MLX5_CMD_OP_CREATE_TIS:
318 return MLX5_GET(create_tis_out, out, tisn);
319 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
320 return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
321 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
322 return MLX5_GET(create_flow_table_out, out, table_id);
323 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
324 return MLX5_GET(create_flow_group_out, out, group_id);
325 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
326 return MLX5_GET(set_fte_in, in, flow_index);
327 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
328 return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
329 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
330 return MLX5_GET(alloc_packet_reformat_context_out, out,
332 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
333 return MLX5_GET(alloc_modify_header_context_out, out,
335 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
336 return MLX5_GET(create_scheduling_element_out, out,
337 scheduling_element_id);
338 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
339 return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
340 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
341 return MLX5_GET(set_l2_table_entry_in, in, table_index);
342 case MLX5_CMD_OP_CREATE_QP:
343 return MLX5_GET(create_qp_out, out, qpn);
344 case MLX5_CMD_OP_CREATE_SRQ:
345 return MLX5_GET(create_srq_out, out, srqn);
346 case MLX5_CMD_OP_CREATE_XRC_SRQ:
347 return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
348 case MLX5_CMD_OP_CREATE_DCT:
349 return MLX5_GET(create_dct_out, out, dctn);
350 case MLX5_CMD_OP_CREATE_XRQ:
351 return MLX5_GET(create_xrq_out, out, xrqn);
352 case MLX5_CMD_OP_ATTACH_TO_MCG:
353 return MLX5_GET(attach_to_mcg_in, in, qpn);
354 case MLX5_CMD_OP_ALLOC_XRCD:
355 return MLX5_GET(alloc_xrcd_out, out, xrcd);
356 case MLX5_CMD_OP_CREATE_PSV:
357 return MLX5_GET(create_psv_out, out, psv0_index);
359 /* The entry must match to one of the devx_is_obj_create_cmd */
365 static u64 devx_get_obj_id(const void *in)
367 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
371 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
372 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
374 MLX5_GET(general_obj_in_cmd_hdr, in,
376 MLX5_GET(general_obj_in_cmd_hdr, in,
379 case MLX5_CMD_OP_QUERY_MKEY:
380 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
381 MLX5_GET(query_mkey_in, in,
384 case MLX5_CMD_OP_QUERY_CQ:
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
386 MLX5_GET(query_cq_in, in, cqn));
388 case MLX5_CMD_OP_MODIFY_CQ:
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
390 MLX5_GET(modify_cq_in, in, cqn));
392 case MLX5_CMD_OP_QUERY_SQ:
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
394 MLX5_GET(query_sq_in, in, sqn));
396 case MLX5_CMD_OP_MODIFY_SQ:
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
398 MLX5_GET(modify_sq_in, in, sqn));
400 case MLX5_CMD_OP_QUERY_RQ:
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
402 MLX5_GET(query_rq_in, in, rqn));
404 case MLX5_CMD_OP_MODIFY_RQ:
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
406 MLX5_GET(modify_rq_in, in, rqn));
408 case MLX5_CMD_OP_QUERY_RMP:
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
410 MLX5_GET(query_rmp_in, in, rmpn));
412 case MLX5_CMD_OP_MODIFY_RMP:
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
414 MLX5_GET(modify_rmp_in, in, rmpn));
416 case MLX5_CMD_OP_QUERY_RQT:
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
418 MLX5_GET(query_rqt_in, in, rqtn));
420 case MLX5_CMD_OP_MODIFY_RQT:
421 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
422 MLX5_GET(modify_rqt_in, in, rqtn));
424 case MLX5_CMD_OP_QUERY_TIR:
425 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
426 MLX5_GET(query_tir_in, in, tirn));
428 case MLX5_CMD_OP_MODIFY_TIR:
429 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
430 MLX5_GET(modify_tir_in, in, tirn));
432 case MLX5_CMD_OP_QUERY_TIS:
433 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
434 MLX5_GET(query_tis_in, in, tisn));
436 case MLX5_CMD_OP_MODIFY_TIS:
437 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
438 MLX5_GET(modify_tis_in, in, tisn));
440 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
442 MLX5_GET(query_flow_table_in, in,
445 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
446 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
447 MLX5_GET(modify_flow_table_in, in,
450 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
451 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
452 MLX5_GET(query_flow_group_in, in,
455 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
456 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
457 MLX5_GET(query_fte_in, in,
460 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
461 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
462 MLX5_GET(set_fte_in, in, flow_index));
464 case MLX5_CMD_OP_QUERY_Q_COUNTER:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
466 MLX5_GET(query_q_counter_in, in,
469 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
471 MLX5_GET(query_flow_counter_in, in,
474 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
476 MLX5_GET(query_modify_header_context_in,
477 in, modify_header_id));
479 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
481 MLX5_GET(query_scheduling_element_in,
482 in, scheduling_element_id));
484 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
486 MLX5_GET(modify_scheduling_element_in,
487 in, scheduling_element_id));
489 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
490 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
491 MLX5_GET(add_vxlan_udp_dport_in, in,
494 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
495 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
496 MLX5_GET(query_l2_table_entry_in, in,
499 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
500 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
501 MLX5_GET(set_l2_table_entry_in, in,
504 case MLX5_CMD_OP_QUERY_QP:
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(query_qp_in, in, qpn));
508 case MLX5_CMD_OP_RST2INIT_QP:
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(rst2init_qp_in, in, qpn));
512 case MLX5_CMD_OP_INIT2INIT_QP:
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514 MLX5_GET(init2init_qp_in, in, qpn));
516 case MLX5_CMD_OP_INIT2RTR_QP:
517 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518 MLX5_GET(init2rtr_qp_in, in, qpn));
520 case MLX5_CMD_OP_RTR2RTS_QP:
521 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
522 MLX5_GET(rtr2rts_qp_in, in, qpn));
524 case MLX5_CMD_OP_RTS2RTS_QP:
525 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
526 MLX5_GET(rts2rts_qp_in, in, qpn));
528 case MLX5_CMD_OP_SQERR2RTS_QP:
529 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
530 MLX5_GET(sqerr2rts_qp_in, in, qpn));
532 case MLX5_CMD_OP_2ERR_QP:
533 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
534 MLX5_GET(qp_2err_in, in, qpn));
536 case MLX5_CMD_OP_2RST_QP:
537 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
538 MLX5_GET(qp_2rst_in, in, qpn));
540 case MLX5_CMD_OP_QUERY_DCT:
541 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
542 MLX5_GET(query_dct_in, in, dctn));
544 case MLX5_CMD_OP_QUERY_XRQ:
545 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
546 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
547 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
548 MLX5_GET(query_xrq_in, in, xrqn));
550 case MLX5_CMD_OP_QUERY_XRC_SRQ:
551 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
552 MLX5_GET(query_xrc_srq_in, in,
555 case MLX5_CMD_OP_ARM_XRC_SRQ:
556 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
557 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
559 case MLX5_CMD_OP_QUERY_SRQ:
560 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
561 MLX5_GET(query_srq_in, in, srqn));
563 case MLX5_CMD_OP_ARM_RQ:
564 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
565 MLX5_GET(arm_rq_in, in, srq_number));
567 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
568 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
569 MLX5_GET(drain_dct_in, in, dctn));
571 case MLX5_CMD_OP_ARM_XRQ:
572 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
573 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
574 case MLX5_CMD_OP_MODIFY_XRQ:
575 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
576 MLX5_GET(arm_xrq_in, in, xrqn));
578 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
579 obj_id = get_enc_obj_id
580 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
581 MLX5_GET(query_packet_reformat_context_in,
582 in, packet_reformat_id));
591 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
592 struct ib_uobject *uobj, const void *in)
594 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
595 u64 obj_id = devx_get_obj_id(in);
600 switch (uobj_get_object_id(uobj)) {
601 case UVERBS_OBJECT_CQ:
602 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
603 to_mcq(uobj->object)->mcq.cqn) ==
606 case UVERBS_OBJECT_SRQ:
608 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
611 switch (srq->common.res) {
613 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
616 opcode = MLX5_CMD_OP_CREATE_XRQ;
619 if (!dev->mdev->issi)
620 opcode = MLX5_CMD_OP_CREATE_SRQ;
622 opcode = MLX5_CMD_OP_CREATE_RMP;
625 return get_enc_obj_id(opcode,
626 to_msrq(uobj->object)->msrq.srqn) ==
630 case UVERBS_OBJECT_QP:
632 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
634 if (qp->type == IB_QPT_RAW_PACKET ||
635 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
636 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
638 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
639 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
641 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
642 rq->base.mqp.qpn) == obj_id ||
643 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
644 sq->base.mqp.qpn) == obj_id ||
645 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
646 rq->tirn) == obj_id ||
647 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
648 sq->tisn) == obj_id);
651 if (qp->type == MLX5_IB_QPT_DCT)
652 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
653 qp->dct.mdct.mqp.qpn) == obj_id;
654 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
655 qp->ibqp.qp_num) == obj_id;
658 case UVERBS_OBJECT_WQ:
659 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
660 to_mrwq(uobj->object)->core_qp.qpn) ==
663 case UVERBS_OBJECT_RWQ_IND_TBL:
664 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
665 to_mrwq_ind_table(uobj->object)->rqtn) ==
668 case MLX5_IB_OBJECT_DEVX_OBJ:
669 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
676 static void devx_set_umem_valid(const void *in)
678 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
681 case MLX5_CMD_OP_CREATE_MKEY:
682 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
684 case MLX5_CMD_OP_CREATE_CQ:
688 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
689 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
690 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
693 case MLX5_CMD_OP_CREATE_QP:
697 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
698 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
699 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
703 case MLX5_CMD_OP_CREATE_RQ:
707 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
708 wq = MLX5_ADDR_OF(rqc, rqc, wq);
709 MLX5_SET(wq, wq, dbr_umem_valid, 1);
710 MLX5_SET(wq, wq, wq_umem_valid, 1);
714 case MLX5_CMD_OP_CREATE_SQ:
718 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
719 wq = MLX5_ADDR_OF(sqc, sqc, wq);
720 MLX5_SET(wq, wq, dbr_umem_valid, 1);
721 MLX5_SET(wq, wq, wq_umem_valid, 1);
725 case MLX5_CMD_OP_MODIFY_CQ:
726 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
729 case MLX5_CMD_OP_CREATE_RMP:
733 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
734 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
735 MLX5_SET(wq, wq, dbr_umem_valid, 1);
736 MLX5_SET(wq, wq, wq_umem_valid, 1);
740 case MLX5_CMD_OP_CREATE_XRQ:
744 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
745 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
746 MLX5_SET(wq, wq, dbr_umem_valid, 1);
747 MLX5_SET(wq, wq, wq_umem_valid, 1);
751 case MLX5_CMD_OP_CREATE_XRC_SRQ:
755 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
756 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
757 xrc_srq_context_entry);
758 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
767 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
769 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
772 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
773 case MLX5_CMD_OP_CREATE_MKEY:
774 case MLX5_CMD_OP_CREATE_CQ:
775 case MLX5_CMD_OP_ALLOC_PD:
776 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
777 case MLX5_CMD_OP_CREATE_RMP:
778 case MLX5_CMD_OP_CREATE_SQ:
779 case MLX5_CMD_OP_CREATE_RQ:
780 case MLX5_CMD_OP_CREATE_RQT:
781 case MLX5_CMD_OP_CREATE_TIR:
782 case MLX5_CMD_OP_CREATE_TIS:
783 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
784 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
785 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
786 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
787 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
788 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
789 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
790 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
791 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
792 case MLX5_CMD_OP_CREATE_QP:
793 case MLX5_CMD_OP_CREATE_SRQ:
794 case MLX5_CMD_OP_CREATE_XRC_SRQ:
795 case MLX5_CMD_OP_CREATE_DCT:
796 case MLX5_CMD_OP_CREATE_XRQ:
797 case MLX5_CMD_OP_ATTACH_TO_MCG:
798 case MLX5_CMD_OP_ALLOC_XRCD:
800 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
802 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
807 case MLX5_CMD_OP_CREATE_PSV:
809 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
820 static bool devx_is_obj_modify_cmd(const void *in)
822 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
825 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
826 case MLX5_CMD_OP_MODIFY_CQ:
827 case MLX5_CMD_OP_MODIFY_RMP:
828 case MLX5_CMD_OP_MODIFY_SQ:
829 case MLX5_CMD_OP_MODIFY_RQ:
830 case MLX5_CMD_OP_MODIFY_RQT:
831 case MLX5_CMD_OP_MODIFY_TIR:
832 case MLX5_CMD_OP_MODIFY_TIS:
833 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
834 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
835 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
836 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
837 case MLX5_CMD_OP_RST2INIT_QP:
838 case MLX5_CMD_OP_INIT2RTR_QP:
839 case MLX5_CMD_OP_INIT2INIT_QP:
840 case MLX5_CMD_OP_RTR2RTS_QP:
841 case MLX5_CMD_OP_RTS2RTS_QP:
842 case MLX5_CMD_OP_SQERR2RTS_QP:
843 case MLX5_CMD_OP_2ERR_QP:
844 case MLX5_CMD_OP_2RST_QP:
845 case MLX5_CMD_OP_ARM_XRC_SRQ:
846 case MLX5_CMD_OP_ARM_RQ:
847 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
848 case MLX5_CMD_OP_ARM_XRQ:
849 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
850 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
851 case MLX5_CMD_OP_MODIFY_XRQ:
853 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
855 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
866 static bool devx_is_obj_query_cmd(const void *in)
868 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
871 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
872 case MLX5_CMD_OP_QUERY_MKEY:
873 case MLX5_CMD_OP_QUERY_CQ:
874 case MLX5_CMD_OP_QUERY_RMP:
875 case MLX5_CMD_OP_QUERY_SQ:
876 case MLX5_CMD_OP_QUERY_RQ:
877 case MLX5_CMD_OP_QUERY_RQT:
878 case MLX5_CMD_OP_QUERY_TIR:
879 case MLX5_CMD_OP_QUERY_TIS:
880 case MLX5_CMD_OP_QUERY_Q_COUNTER:
881 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
882 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
883 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
884 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
885 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
886 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
887 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
888 case MLX5_CMD_OP_QUERY_QP:
889 case MLX5_CMD_OP_QUERY_SRQ:
890 case MLX5_CMD_OP_QUERY_XRC_SRQ:
891 case MLX5_CMD_OP_QUERY_DCT:
892 case MLX5_CMD_OP_QUERY_XRQ:
893 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
894 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
895 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
902 static bool devx_is_whitelist_cmd(void *in)
904 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
907 case MLX5_CMD_OP_QUERY_HCA_CAP:
908 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
909 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
916 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
918 if (devx_is_whitelist_cmd(cmd_in)) {
919 struct mlx5_ib_dev *dev;
924 dev = to_mdev(c->ibucontext.device);
925 if (dev->devx_whitelist_uid)
926 return dev->devx_whitelist_uid;
937 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
939 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
941 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
942 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
943 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
944 (opcode >= MLX5_CMD_OP_GENERAL_START &&
945 opcode < MLX5_CMD_OP_GENERAL_END))
949 case MLX5_CMD_OP_QUERY_HCA_CAP:
950 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
951 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
952 case MLX5_CMD_OP_QUERY_VPORT_STATE:
953 case MLX5_CMD_OP_QUERY_ADAPTER:
954 case MLX5_CMD_OP_QUERY_ISSI:
955 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
956 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
957 case MLX5_CMD_OP_QUERY_VNIC_ENV:
958 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
959 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
960 case MLX5_CMD_OP_NOP:
961 case MLX5_CMD_OP_QUERY_CONG_STATUS:
962 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
963 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
964 case MLX5_CMD_OP_QUERY_LAG:
971 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
972 struct uverbs_attr_bundle *attrs)
974 struct mlx5_ib_ucontext *c;
975 struct mlx5_ib_dev *dev;
981 if (uverbs_copy_from(&user_vector, attrs,
982 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
985 c = devx_ufile2uctx(attrs);
988 dev = to_mdev(c->ibucontext.device);
990 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
994 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
995 &dev_eqn, sizeof(dev_eqn)))
1003 * The hardware protection mechanism works like this: Each device object that
1004 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1005 * the device specification manual) upon its creation. Then upon doorbell,
1006 * hardware fetches the object context for which the doorbell was rang, and
1007 * validates that the UAR through which the DB was rang matches the UAR ID
1009 * If no match the doorbell is silently ignored by the hardware. Of course,
1010 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1011 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1012 * mailboxes (except tagging them with UID), we expose to the user its UAR
1013 * ID, so it can embed it in these objects in the expected specification
1014 * format. So the only thing the user can do is hurt itself by creating a
1015 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1016 * may ring a doorbell on its objects.
1017 * The consequence of that will be that another user can schedule a QP/SQ
1018 * of the buggy user for execution (just insert it to the hardware schedule
1019 * queue or arm its CQ for event generation), no further harm is expected.
1021 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1022 struct uverbs_attr_bundle *attrs)
1024 struct mlx5_ib_ucontext *c;
1025 struct mlx5_ib_dev *dev;
1029 c = devx_ufile2uctx(attrs);
1032 dev = to_mdev(c->ibucontext.device);
1034 if (uverbs_copy_from(&user_idx, attrs,
1035 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1038 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1042 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1043 &dev_idx, sizeof(dev_idx)))
1049 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1050 struct uverbs_attr_bundle *attrs)
1052 struct mlx5_ib_ucontext *c;
1053 struct mlx5_ib_dev *dev;
1054 void *cmd_in = uverbs_attr_get_alloced_ptr(
1055 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1056 int cmd_out_len = uverbs_attr_get_len(attrs,
1057 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1062 c = devx_ufile2uctx(attrs);
1065 dev = to_mdev(c->ibucontext.device);
1067 uid = devx_get_uid(c, cmd_in);
1071 /* Only white list of some general HCA commands are allowed for this method. */
1072 if (!devx_is_general_cmd(cmd_in, dev))
1075 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1076 if (IS_ERR(cmd_out))
1077 return PTR_ERR(cmd_out);
1079 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1080 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1081 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1082 cmd_out, cmd_out_len);
1086 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1090 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1094 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1095 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1097 *obj_id = devx_get_created_obj_id(in, out, opcode);
1098 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1099 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1102 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1103 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1104 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1105 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1106 MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1109 case MLX5_CMD_OP_CREATE_UMEM:
1110 MLX5_SET(destroy_umem_in, din, opcode,
1111 MLX5_CMD_OP_DESTROY_UMEM);
1112 MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1114 case MLX5_CMD_OP_CREATE_MKEY:
1115 MLX5_SET(destroy_mkey_in, din, opcode,
1116 MLX5_CMD_OP_DESTROY_MKEY);
1117 MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1119 case MLX5_CMD_OP_CREATE_CQ:
1120 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1121 MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1123 case MLX5_CMD_OP_ALLOC_PD:
1124 MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1125 MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1127 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1128 MLX5_SET(dealloc_transport_domain_in, din, opcode,
1129 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1130 MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1133 case MLX5_CMD_OP_CREATE_RMP:
1134 MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1135 MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1137 case MLX5_CMD_OP_CREATE_SQ:
1138 MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1139 MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1141 case MLX5_CMD_OP_CREATE_RQ:
1142 MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1143 MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1145 case MLX5_CMD_OP_CREATE_RQT:
1146 MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1147 MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1149 case MLX5_CMD_OP_CREATE_TIR:
1150 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1151 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1153 case MLX5_CMD_OP_CREATE_TIS:
1154 MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1155 MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1157 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1158 MLX5_SET(dealloc_q_counter_in, din, opcode,
1159 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1160 MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1162 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1163 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1164 MLX5_SET(destroy_flow_table_in, din, other_vport,
1165 MLX5_GET(create_flow_table_in, in, other_vport));
1166 MLX5_SET(destroy_flow_table_in, din, vport_number,
1167 MLX5_GET(create_flow_table_in, in, vport_number));
1168 MLX5_SET(destroy_flow_table_in, din, table_type,
1169 MLX5_GET(create_flow_table_in, in, table_type));
1170 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1171 MLX5_SET(destroy_flow_table_in, din, opcode,
1172 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1174 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1175 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1176 MLX5_SET(destroy_flow_group_in, din, other_vport,
1177 MLX5_GET(create_flow_group_in, in, other_vport));
1178 MLX5_SET(destroy_flow_group_in, din, vport_number,
1179 MLX5_GET(create_flow_group_in, in, vport_number));
1180 MLX5_SET(destroy_flow_group_in, din, table_type,
1181 MLX5_GET(create_flow_group_in, in, table_type));
1182 MLX5_SET(destroy_flow_group_in, din, table_id,
1183 MLX5_GET(create_flow_group_in, in, table_id));
1184 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1185 MLX5_SET(destroy_flow_group_in, din, opcode,
1186 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1188 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1189 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1190 MLX5_SET(delete_fte_in, din, other_vport,
1191 MLX5_GET(set_fte_in, in, other_vport));
1192 MLX5_SET(delete_fte_in, din, vport_number,
1193 MLX5_GET(set_fte_in, in, vport_number));
1194 MLX5_SET(delete_fte_in, din, table_type,
1195 MLX5_GET(set_fte_in, in, table_type));
1196 MLX5_SET(delete_fte_in, din, table_id,
1197 MLX5_GET(set_fte_in, in, table_id));
1198 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1199 MLX5_SET(delete_fte_in, din, opcode,
1200 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1202 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1203 MLX5_SET(dealloc_flow_counter_in, din, opcode,
1204 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1205 MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1208 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1209 MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1210 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1211 MLX5_SET(dealloc_packet_reformat_context_in, din,
1212 packet_reformat_id, *obj_id);
1214 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1215 MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1216 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1217 MLX5_SET(dealloc_modify_header_context_in, din,
1218 modify_header_id, *obj_id);
1220 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1221 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1222 MLX5_SET(destroy_scheduling_element_in, din,
1223 scheduling_hierarchy,
1224 MLX5_GET(create_scheduling_element_in, in,
1225 scheduling_hierarchy));
1226 MLX5_SET(destroy_scheduling_element_in, din,
1227 scheduling_element_id, *obj_id);
1228 MLX5_SET(destroy_scheduling_element_in, din, opcode,
1229 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1231 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1232 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1233 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1234 MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1235 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1237 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1238 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1239 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1240 MLX5_SET(delete_l2_table_entry_in, din, opcode,
1241 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1243 case MLX5_CMD_OP_CREATE_QP:
1244 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1245 MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1247 case MLX5_CMD_OP_CREATE_SRQ:
1248 MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1249 MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1251 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1252 MLX5_SET(destroy_xrc_srq_in, din, opcode,
1253 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1254 MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1256 case MLX5_CMD_OP_CREATE_DCT:
1257 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1258 MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1260 case MLX5_CMD_OP_CREATE_XRQ:
1261 MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1262 MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1264 case MLX5_CMD_OP_ATTACH_TO_MCG:
1265 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1266 MLX5_SET(detach_from_mcg_in, din, qpn,
1267 MLX5_GET(attach_to_mcg_in, in, qpn));
1268 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1269 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1270 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1271 MLX5_SET(detach_from_mcg_in, din, opcode,
1272 MLX5_CMD_OP_DETACH_FROM_MCG);
1273 MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1275 case MLX5_CMD_OP_ALLOC_XRCD:
1276 MLX5_SET(dealloc_xrcd_in, din, opcode,
1277 MLX5_CMD_OP_DEALLOC_XRCD);
1278 MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1280 case MLX5_CMD_OP_CREATE_PSV:
1281 MLX5_SET(destroy_psv_in, din, opcode,
1282 MLX5_CMD_OP_DESTROY_PSV);
1283 MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1286 /* The entry must match to one of the devx_is_obj_create_cmd */
1292 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1293 struct mlx5_ib_dev *dev,
1294 void *in, void *out)
1296 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1297 struct mlx5_core_mkey *mkey;
1301 mkey = &devx_mr->mmkey;
1302 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1303 key = MLX5_GET(mkc, mkc, mkey_7_0);
1304 mkey->key = mlx5_idx_to_mkey(
1305 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1306 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1307 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1308 mkey->size = MLX5_GET64(mkc, mkc, len);
1309 mkey->pd = MLX5_GET(mkc, mkc, pd);
1310 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1311 init_waitqueue_head(&mkey->wait);
1313 return mlx5r_store_odp_mkey(dev, mkey);
1316 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1317 struct devx_obj *obj,
1318 void *in, int in_len)
1320 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1321 MLX5_FLD_SZ_BYTES(create_mkey_in,
1322 memory_key_mkey_entry);
1326 if (in_len < min_len)
1329 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1331 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1332 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1334 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1335 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1336 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1337 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1341 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1345 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1346 struct devx_event_subscription *sub)
1348 struct devx_event *event;
1349 struct devx_obj_event *xa_val_level2;
1351 if (sub->is_cleaned)
1354 sub->is_cleaned = 1;
1355 list_del_rcu(&sub->xa_list);
1357 if (list_empty(&sub->obj_list))
1360 list_del_rcu(&sub->obj_list);
1361 /* check whether key level 1 for this obj_sub_list is empty */
1362 event = xa_load(&dev->devx_event_table.event_xa,
1363 sub->xa_key_level1);
1366 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1367 if (list_empty(&xa_val_level2->obj_sub_list)) {
1368 xa_erase(&event->object_ids,
1369 sub->xa_key_level2);
1370 kfree_rcu(xa_val_level2, rcu);
1374 static int devx_obj_cleanup(struct ib_uobject *uobject,
1375 enum rdma_remove_reason why,
1376 struct uverbs_attr_bundle *attrs)
1378 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1379 struct mlx5_devx_event_table *devx_event_table;
1380 struct devx_obj *obj = uobject->object;
1381 struct devx_event_subscription *sub_entry, *tmp;
1382 struct mlx5_ib_dev *dev;
1385 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1386 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1387 xa_erase(&obj->ib_dev->odp_mkeys,
1388 mlx5_base_mkey(obj->devx_mr.mmkey.key)))
1390 * The pagefault_single_data_segment() does commands against
1391 * the mmkey, we must wait for that to stop before freeing the
1392 * mkey, as another allocation could get the same mkey #.
1394 mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
1396 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1397 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1398 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1399 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1401 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1402 obj->dinlen, out, sizeof(out));
1406 devx_event_table = &dev->devx_event_table;
1408 mutex_lock(&devx_event_table->event_xa_lock);
1409 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1410 devx_cleanup_subscription(dev, sub_entry);
1411 mutex_unlock(&devx_event_table->event_xa_lock);
1417 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1419 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1420 struct mlx5_devx_event_table *table;
1421 struct devx_event *event;
1422 struct devx_obj_event *obj_event;
1423 u32 obj_id = mcq->cqn;
1425 table = &obj->ib_dev->devx_event_table;
1427 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1431 obj_event = xa_load(&event->object_ids, obj_id);
1435 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1440 static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in)
1442 if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1443 !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
1450 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1451 struct uverbs_attr_bundle *attrs)
1453 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1454 int cmd_out_len = uverbs_attr_get_len(attrs,
1455 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1456 int cmd_in_len = uverbs_attr_get_len(attrs,
1457 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1459 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1460 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1461 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1462 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1463 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1464 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1465 struct devx_obj *obj;
1472 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1475 uid = devx_get_uid(c, cmd_in);
1479 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1482 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1483 if (IS_ERR(cmd_out))
1484 return PTR_ERR(cmd_out);
1486 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1490 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1491 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1492 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1496 devx_set_umem_valid(cmd_in);
1499 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1500 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1501 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1502 cmd_in_len, cmd_out, cmd_out_len);
1503 } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1504 !is_apu_thread_cq(dev, cmd_in)) {
1505 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1506 obj->core_cq.comp = devx_cq_comp;
1507 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1508 cmd_in, cmd_in_len, cmd_out,
1511 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1513 cmd_out, cmd_out_len);
1519 if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1520 u8 bulk = MLX5_GET(alloc_flow_counter_in,
1523 obj->flow_counter_bulk_size = 128UL * bulk;
1527 INIT_LIST_HEAD(&obj->event_sub);
1529 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1531 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1533 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1537 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1538 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1539 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1541 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1542 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1549 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1550 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1551 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1552 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1554 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1561 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1562 struct uverbs_attr_bundle *attrs)
1564 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1565 int cmd_out_len = uverbs_attr_get_len(attrs,
1566 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1567 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1568 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1569 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1570 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1571 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1576 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1579 uid = devx_get_uid(c, cmd_in);
1583 if (!devx_is_obj_modify_cmd(cmd_in))
1586 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1589 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1590 if (IS_ERR(cmd_out))
1591 return PTR_ERR(cmd_out);
1593 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1594 devx_set_umem_valid(cmd_in);
1596 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1597 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1598 cmd_out, cmd_out_len);
1602 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1603 cmd_out, cmd_out_len);
1606 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1607 struct uverbs_attr_bundle *attrs)
1609 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1610 int cmd_out_len = uverbs_attr_get_len(attrs,
1611 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1612 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1613 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1614 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1615 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1619 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1621 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1624 uid = devx_get_uid(c, cmd_in);
1628 if (!devx_is_obj_query_cmd(cmd_in))
1631 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1634 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1635 if (IS_ERR(cmd_out))
1636 return PTR_ERR(cmd_out);
1638 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1639 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1640 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1641 cmd_out, cmd_out_len);
1645 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1646 cmd_out, cmd_out_len);
1649 struct devx_async_event_queue {
1651 wait_queue_head_t poll_wait;
1652 struct list_head event_list;
1653 atomic_t bytes_in_use;
1657 struct devx_async_cmd_event_file {
1658 struct ib_uobject uobj;
1659 struct devx_async_event_queue ev_queue;
1660 struct mlx5_async_ctx async_ctx;
1663 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1665 spin_lock_init(&ev_queue->lock);
1666 INIT_LIST_HEAD(&ev_queue->event_list);
1667 init_waitqueue_head(&ev_queue->poll_wait);
1668 atomic_set(&ev_queue->bytes_in_use, 0);
1669 ev_queue->is_destroyed = 0;
1672 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1673 struct uverbs_attr_bundle *attrs)
1675 struct devx_async_cmd_event_file *ev_file;
1677 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1678 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1679 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1681 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1683 devx_init_event_queue(&ev_file->ev_queue);
1684 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1688 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1689 struct uverbs_attr_bundle *attrs)
1691 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1692 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1693 struct devx_async_event_file *ev_file;
1694 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1695 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1696 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1700 err = uverbs_get_flags32(&flags, attrs,
1701 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1702 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1707 ev_file = container_of(uobj, struct devx_async_event_file,
1709 spin_lock_init(&ev_file->lock);
1710 INIT_LIST_HEAD(&ev_file->event_list);
1711 init_waitqueue_head(&ev_file->poll_wait);
1712 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1713 ev_file->omit_data = 1;
1714 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1716 get_device(&dev->ib_dev.dev);
1720 static void devx_query_callback(int status, struct mlx5_async_work *context)
1722 struct devx_async_data *async_data =
1723 container_of(context, struct devx_async_data, cb_work);
1724 struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1725 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1726 unsigned long flags;
1729 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1730 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1731 * routine returns, ensuring that it always remains valid here.
1733 spin_lock_irqsave(&ev_queue->lock, flags);
1734 list_add_tail(&async_data->list, &ev_queue->event_list);
1735 spin_unlock_irqrestore(&ev_queue->lock, flags);
1737 wake_up_interruptible(&ev_queue->poll_wait);
1740 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1742 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1743 struct uverbs_attr_bundle *attrs)
1745 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1746 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1747 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1749 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1751 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1752 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1753 struct ib_uobject *fd_uobj;
1756 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1757 struct devx_async_cmd_event_file *ev_file;
1758 struct devx_async_data *async_data;
1760 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1763 uid = devx_get_uid(c, cmd_in);
1767 if (!devx_is_obj_query_cmd(cmd_in))
1770 err = uverbs_get_const(&cmd_out_len, attrs,
1771 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1775 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1778 fd_uobj = uverbs_attr_get_uobject(attrs,
1779 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1780 if (IS_ERR(fd_uobj))
1781 return PTR_ERR(fd_uobj);
1783 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1786 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1787 MAX_ASYNC_BYTES_IN_USE) {
1788 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1792 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1793 cmd_out_len), GFP_KERNEL);
1799 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1800 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1804 async_data->cmd_out_len = cmd_out_len;
1805 async_data->mdev = mdev;
1806 async_data->ev_file = ev_file;
1808 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1809 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1810 uverbs_attr_get_len(attrs,
1811 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1812 async_data->hdr.out_data,
1813 async_data->cmd_out_len,
1814 devx_query_callback, &async_data->cb_work);
1824 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1829 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1834 struct devx_event *event;
1835 struct devx_obj_event *xa_val_level2;
1837 /* Level 1 is valid for future use, no need to free */
1841 event = xa_load(&devx_event_table->event_xa, key_level1);
1844 xa_val_level2 = xa_load(&event->object_ids,
1846 if (list_empty(&xa_val_level2->obj_sub_list)) {
1847 xa_erase(&event->object_ids,
1849 kfree_rcu(xa_val_level2, rcu);
1854 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1859 struct devx_obj_event *obj_event;
1860 struct devx_event *event;
1863 event = xa_load(&devx_event_table->event_xa, key_level1);
1865 event = kzalloc(sizeof(*event), GFP_KERNEL);
1869 INIT_LIST_HEAD(&event->unaffiliated_list);
1870 xa_init(&event->object_ids);
1872 err = xa_insert(&devx_event_table->event_xa,
1885 obj_event = xa_load(&event->object_ids, key_level2);
1887 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1889 /* Level1 is valid for future use, no need to free */
1892 err = xa_insert(&event->object_ids,
1898 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1904 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1905 struct devx_obj *obj)
1909 for (i = 0; i < num_events; i++) {
1911 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1913 } else if (!is_legacy_unaffiliated_event_num(
1914 event_type_num_list[i])) {
1922 #define MAX_SUPP_EVENT_NUM 255
1923 static bool is_valid_events(struct mlx5_core_dev *dev,
1924 int num_events, u16 *event_type_num_list,
1925 struct devx_obj *obj)
1928 __be64 *unaff_events;
1933 if (MLX5_CAP_GEN(dev, event_cap)) {
1934 aff_events = MLX5_CAP_DEV_EVENT(dev,
1935 user_affiliated_events);
1936 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1937 user_unaffiliated_events);
1939 return is_valid_events_legacy(num_events, event_type_num_list,
1943 for (i = 0; i < num_events; i++) {
1944 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1947 mask_entry = event_type_num_list[i] / 64;
1948 mask_bit = event_type_num_list[i] % 64;
1952 if (event_type_num_list[i] == 0)
1955 if (!(be64_to_cpu(aff_events[mask_entry]) &
1956 (1ull << mask_bit)))
1962 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1963 (1ull << mask_bit)))
1970 #define MAX_NUM_EVENTS 16
1971 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1972 struct uverbs_attr_bundle *attrs)
1974 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1976 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1977 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1978 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1979 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1980 struct ib_uobject *fd_uobj;
1981 struct devx_obj *obj = NULL;
1982 struct devx_async_event_file *ev_file;
1983 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1984 u16 *event_type_num_list;
1985 struct devx_event_subscription *event_sub, *tmp_sub;
1986 struct list_head sub_list;
1988 bool use_eventfd = false;
1990 int num_alloc_xa_entries = 0;
2000 if (!IS_ERR(devx_uobj)) {
2001 obj = (struct devx_obj *)devx_uobj->object;
2003 obj_id = get_dec_obj_id(obj->obj_id);
2006 fd_uobj = uverbs_attr_get_uobject(attrs,
2007 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2008 if (IS_ERR(fd_uobj))
2009 return PTR_ERR(fd_uobj);
2011 ev_file = container_of(fd_uobj, struct devx_async_event_file,
2014 if (uverbs_attr_is_valid(attrs,
2015 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2016 err = uverbs_copy_from(&redirect_fd, attrs,
2017 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2024 if (uverbs_attr_is_valid(attrs,
2025 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2029 err = uverbs_copy_from(&cookie, attrs,
2030 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2035 num_events = uverbs_attr_ptr_get_array_size(
2036 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2042 if (num_events > MAX_NUM_EVENTS)
2045 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2046 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2048 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2051 INIT_LIST_HEAD(&sub_list);
2053 /* Protect from concurrent subscriptions to same XA entries to allow
2056 mutex_lock(&devx_event_table->event_xa_lock);
2057 for (i = 0; i < num_events; i++) {
2061 obj_type = get_dec_obj_type(obj,
2062 event_type_num_list[i]);
2063 key_level1 = event_type_num_list[i] | obj_type << 16;
2065 err = subscribe_event_xa_alloc(devx_event_table,
2072 num_alloc_xa_entries++;
2073 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2079 list_add_tail(&event_sub->event_list, &sub_list);
2080 uverbs_uobject_get(&ev_file->uobj);
2082 event_sub->eventfd =
2083 eventfd_ctx_fdget(redirect_fd);
2085 if (IS_ERR(event_sub->eventfd)) {
2086 err = PTR_ERR(event_sub->eventfd);
2087 event_sub->eventfd = NULL;
2092 event_sub->cookie = cookie;
2093 event_sub->ev_file = ev_file;
2094 /* May be needed upon cleanup the devx object/subscription */
2095 event_sub->xa_key_level1 = key_level1;
2096 event_sub->xa_key_level2 = obj_id;
2097 INIT_LIST_HEAD(&event_sub->obj_list);
2100 /* Once all the allocations and the XA data insertions were done we
2101 * can go ahead and add all the subscriptions to the relevant lists
2102 * without concern of a failure.
2104 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2105 struct devx_event *event;
2106 struct devx_obj_event *obj_event;
2108 list_del_init(&event_sub->event_list);
2110 spin_lock_irq(&ev_file->lock);
2111 list_add_tail_rcu(&event_sub->file_list,
2112 &ev_file->subscribed_events_list);
2113 spin_unlock_irq(&ev_file->lock);
2115 event = xa_load(&devx_event_table->event_xa,
2116 event_sub->xa_key_level1);
2120 list_add_tail_rcu(&event_sub->xa_list,
2121 &event->unaffiliated_list);
2125 obj_event = xa_load(&event->object_ids, obj_id);
2126 WARN_ON(!obj_event);
2127 list_add_tail_rcu(&event_sub->xa_list,
2128 &obj_event->obj_sub_list);
2129 list_add_tail_rcu(&event_sub->obj_list,
2133 mutex_unlock(&devx_event_table->event_xa_lock);
2137 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2138 list_del(&event_sub->event_list);
2140 subscribe_event_xa_dealloc(devx_event_table,
2141 event_sub->xa_key_level1,
2145 if (event_sub->eventfd)
2146 eventfd_ctx_put(event_sub->eventfd);
2147 uverbs_uobject_put(&event_sub->ev_file->uobj);
2151 mutex_unlock(&devx_event_table->event_xa_lock);
2155 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2156 struct uverbs_attr_bundle *attrs,
2157 struct devx_umem *obj)
2164 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2165 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2168 err = uverbs_get_flags32(&access, attrs,
2169 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2170 IB_ACCESS_LOCAL_WRITE |
2171 IB_ACCESS_REMOTE_WRITE |
2172 IB_ACCESS_REMOTE_READ);
2176 err = ib_check_mr_access(&dev->ib_dev, access);
2180 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
2181 if (IS_ERR(obj->umem))
2182 return PTR_ERR(obj->umem);
2186 static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2187 unsigned long pgsz_bitmap)
2189 unsigned long page_size;
2191 /* Don't bother checking larger page sizes as offset must be zero and
2192 * total DEVX umem length must be equal to total umem length.
2194 pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2196 MLX5_ADAPTER_PAGE_SHIFT);
2200 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2204 /* If the page_size is less than the CPU page size then we can use the
2205 * offset and create a umem which is a subset of the page list.
2206 * For larger page sizes we can't be sure the DMA list reflects the
2207 * VA so we must ensure that the umem extent is exactly equal to the
2208 * page list. Reduce the page size until one of these cases is true.
2210 while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2211 (umem->length % page_size) != 0) &&
2212 page_size > PAGE_SIZE)
2218 static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2219 struct uverbs_attr_bundle *attrs,
2220 struct devx_umem *obj,
2221 struct devx_umem_reg_cmd *cmd)
2223 unsigned long pgsz_bitmap;
2224 unsigned int page_size;
2230 * If the user does not pass in pgsz_bitmap then the user promises not
2231 * to use umem_offset!=0 in any commands that allocate on top of the
2234 * If the user wants to use a umem_offset then it must pass in
2235 * pgsz_bitmap which guides the maximum page size and thus maximum
2236 * object alignment inside the umem. See the PRM.
2238 * Users are not allowed to use IOVA here, mkeys are not supported on
2241 ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2242 MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2244 min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2248 page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2252 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2253 (MLX5_ST_SZ_BYTES(mtt) *
2254 ib_umem_num_dma_blocks(obj->umem, page_size));
2255 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2256 if (IS_ERR(cmd->in))
2257 return PTR_ERR(cmd->in);
2259 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2260 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2262 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2263 MLX5_SET64(umem, umem, num_of_mtt,
2264 ib_umem_num_dma_blocks(obj->umem, page_size));
2265 MLX5_SET(umem, umem, log_page_size,
2266 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2267 MLX5_SET(umem, umem, page_offset,
2268 ib_umem_dma_offset(obj->umem, page_size));
2270 mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2271 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2276 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2277 struct uverbs_attr_bundle *attrs)
2279 struct devx_umem_reg_cmd cmd;
2280 struct devx_umem *obj;
2281 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2282 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2284 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2285 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2286 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2292 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2296 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2300 err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
2302 goto err_umem_release;
2304 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2305 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2308 goto err_umem_release;
2310 obj->mdev = dev->mdev;
2312 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2313 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2315 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2320 ib_umem_release(obj->umem);
2326 static int devx_umem_cleanup(struct ib_uobject *uobject,
2327 enum rdma_remove_reason why,
2328 struct uverbs_attr_bundle *attrs)
2330 struct devx_umem *obj = uobject->object;
2331 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2334 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2338 ib_umem_release(obj->umem);
2343 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2344 unsigned long event_type)
2346 __be64 *unaff_events;
2350 if (!MLX5_CAP_GEN(dev, event_cap))
2351 return is_legacy_unaffiliated_event_num(event_type);
2353 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2354 user_unaffiliated_events);
2355 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2357 mask_entry = event_type / 64;
2358 mask_bit = event_type % 64;
2360 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2366 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2368 struct mlx5_eqe *eqe = data;
2371 switch (event_type) {
2372 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2373 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2374 case MLX5_EVENT_TYPE_PATH_MIG:
2375 case MLX5_EVENT_TYPE_COMM_EST:
2376 case MLX5_EVENT_TYPE_SQ_DRAINED:
2377 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2378 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2379 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2380 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2381 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2382 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2384 case MLX5_EVENT_TYPE_XRQ_ERROR:
2385 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2387 case MLX5_EVENT_TYPE_DCT_DRAINED:
2388 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2389 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2391 case MLX5_EVENT_TYPE_CQ_ERROR:
2392 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2395 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2402 static int deliver_event(struct devx_event_subscription *event_sub,
2405 struct devx_async_event_file *ev_file;
2406 struct devx_async_event_data *event_data;
2407 unsigned long flags;
2409 ev_file = event_sub->ev_file;
2411 if (ev_file->omit_data) {
2412 spin_lock_irqsave(&ev_file->lock, flags);
2413 if (!list_empty(&event_sub->event_list) ||
2414 ev_file->is_destroyed) {
2415 spin_unlock_irqrestore(&ev_file->lock, flags);
2419 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2420 spin_unlock_irqrestore(&ev_file->lock, flags);
2421 wake_up_interruptible(&ev_file->poll_wait);
2425 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2428 spin_lock_irqsave(&ev_file->lock, flags);
2429 ev_file->is_overflow_err = 1;
2430 spin_unlock_irqrestore(&ev_file->lock, flags);
2434 event_data->hdr.cookie = event_sub->cookie;
2435 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2437 spin_lock_irqsave(&ev_file->lock, flags);
2438 if (!ev_file->is_destroyed)
2439 list_add_tail(&event_data->list, &ev_file->event_list);
2442 spin_unlock_irqrestore(&ev_file->lock, flags);
2443 wake_up_interruptible(&ev_file->poll_wait);
2448 static void dispatch_event_fd(struct list_head *fd_list,
2451 struct devx_event_subscription *item;
2453 list_for_each_entry_rcu(item, fd_list, xa_list) {
2455 eventfd_signal(item->eventfd, 1);
2457 deliver_event(item, data);
2461 static int devx_event_notifier(struct notifier_block *nb,
2462 unsigned long event_type, void *data)
2464 struct mlx5_devx_event_table *table;
2465 struct mlx5_ib_dev *dev;
2466 struct devx_event *event;
2467 struct devx_obj_event *obj_event;
2469 bool is_unaffiliated;
2472 /* Explicit filtering to kernel events which may occur frequently */
2473 if (event_type == MLX5_EVENT_TYPE_CMD ||
2474 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2477 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2478 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2479 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2481 if (!is_unaffiliated)
2482 obj_type = get_event_obj_type(event_type, data);
2485 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2491 if (is_unaffiliated) {
2492 dispatch_event_fd(&event->unaffiliated_list, data);
2497 obj_id = devx_get_obj_id_from_event(event_type, data);
2498 obj_event = xa_load(&event->object_ids, obj_id);
2504 dispatch_event_fd(&obj_event->obj_sub_list, data);
2510 int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2512 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2515 uid = mlx5_ib_devx_create(dev, false);
2517 dev->devx_whitelist_uid = uid;
2518 xa_init(&table->event_xa);
2519 mutex_init(&table->event_xa_lock);
2520 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2521 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2527 void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2529 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2530 struct devx_event_subscription *sub, *tmp;
2531 struct devx_event *event;
2535 if (dev->devx_whitelist_uid) {
2536 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2537 mutex_lock(&dev->devx_event_table.event_xa_lock);
2538 xa_for_each(&table->event_xa, id, entry) {
2540 list_for_each_entry_safe(
2541 sub, tmp, &event->unaffiliated_list, xa_list)
2542 devx_cleanup_subscription(dev, sub);
2545 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2546 xa_destroy(&table->event_xa);
2548 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2552 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2553 size_t count, loff_t *pos)
2555 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2556 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2557 struct devx_async_data *event;
2561 spin_lock_irq(&ev_queue->lock);
2563 while (list_empty(&ev_queue->event_list)) {
2564 spin_unlock_irq(&ev_queue->lock);
2566 if (filp->f_flags & O_NONBLOCK)
2569 if (wait_event_interruptible(
2570 ev_queue->poll_wait,
2571 (!list_empty(&ev_queue->event_list) ||
2572 ev_queue->is_destroyed))) {
2573 return -ERESTARTSYS;
2576 spin_lock_irq(&ev_queue->lock);
2577 if (ev_queue->is_destroyed) {
2578 spin_unlock_irq(&ev_queue->lock);
2583 event = list_entry(ev_queue->event_list.next,
2584 struct devx_async_data, list);
2585 eventsz = event->cmd_out_len +
2586 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2588 if (eventsz > count) {
2589 spin_unlock_irq(&ev_queue->lock);
2593 list_del(ev_queue->event_list.next);
2594 spin_unlock_irq(&ev_queue->lock);
2596 if (copy_to_user(buf, &event->hdr, eventsz))
2601 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2606 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2607 struct poll_table_struct *wait)
2609 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2610 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2611 __poll_t pollflags = 0;
2613 poll_wait(filp, &ev_queue->poll_wait, wait);
2615 spin_lock_irq(&ev_queue->lock);
2616 if (ev_queue->is_destroyed)
2617 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2618 else if (!list_empty(&ev_queue->event_list))
2619 pollflags = EPOLLIN | EPOLLRDNORM;
2620 spin_unlock_irq(&ev_queue->lock);
2625 static const struct file_operations devx_async_cmd_event_fops = {
2626 .owner = THIS_MODULE,
2627 .read = devx_async_cmd_event_read,
2628 .poll = devx_async_cmd_event_poll,
2629 .release = uverbs_uobject_fd_release,
2630 .llseek = no_llseek,
2633 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2634 size_t count, loff_t *pos)
2636 struct devx_async_event_file *ev_file = filp->private_data;
2637 struct devx_event_subscription *event_sub;
2638 struct devx_async_event_data *event;
2644 omit_data = ev_file->omit_data;
2646 spin_lock_irq(&ev_file->lock);
2648 if (ev_file->is_overflow_err) {
2649 ev_file->is_overflow_err = 0;
2650 spin_unlock_irq(&ev_file->lock);
2655 while (list_empty(&ev_file->event_list)) {
2656 spin_unlock_irq(&ev_file->lock);
2658 if (filp->f_flags & O_NONBLOCK)
2661 if (wait_event_interruptible(ev_file->poll_wait,
2662 (!list_empty(&ev_file->event_list) ||
2663 ev_file->is_destroyed))) {
2664 return -ERESTARTSYS;
2667 spin_lock_irq(&ev_file->lock);
2668 if (ev_file->is_destroyed) {
2669 spin_unlock_irq(&ev_file->lock);
2675 event_sub = list_first_entry(&ev_file->event_list,
2676 struct devx_event_subscription,
2678 eventsz = sizeof(event_sub->cookie);
2679 event_data = &event_sub->cookie;
2681 event = list_first_entry(&ev_file->event_list,
2682 struct devx_async_event_data, list);
2683 eventsz = sizeof(struct mlx5_eqe) +
2684 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2685 event_data = &event->hdr;
2688 if (eventsz > count) {
2689 spin_unlock_irq(&ev_file->lock);
2694 list_del_init(&event_sub->event_list);
2696 list_del(&event->list);
2698 spin_unlock_irq(&ev_file->lock);
2700 if (copy_to_user(buf, event_data, eventsz))
2701 /* This points to an application issue, not a kernel concern */
2711 static __poll_t devx_async_event_poll(struct file *filp,
2712 struct poll_table_struct *wait)
2714 struct devx_async_event_file *ev_file = filp->private_data;
2715 __poll_t pollflags = 0;
2717 poll_wait(filp, &ev_file->poll_wait, wait);
2719 spin_lock_irq(&ev_file->lock);
2720 if (ev_file->is_destroyed)
2721 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2722 else if (!list_empty(&ev_file->event_list))
2723 pollflags = EPOLLIN | EPOLLRDNORM;
2724 spin_unlock_irq(&ev_file->lock);
2729 static void devx_free_subscription(struct rcu_head *rcu)
2731 struct devx_event_subscription *event_sub =
2732 container_of(rcu, struct devx_event_subscription, rcu);
2734 if (event_sub->eventfd)
2735 eventfd_ctx_put(event_sub->eventfd);
2736 uverbs_uobject_put(&event_sub->ev_file->uobj);
2740 static const struct file_operations devx_async_event_fops = {
2741 .owner = THIS_MODULE,
2742 .read = devx_async_event_read,
2743 .poll = devx_async_event_poll,
2744 .release = uverbs_uobject_fd_release,
2745 .llseek = no_llseek,
2748 static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2749 enum rdma_remove_reason why)
2751 struct devx_async_cmd_event_file *comp_ev_file =
2752 container_of(uobj, struct devx_async_cmd_event_file,
2754 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2755 struct devx_async_data *entry, *tmp;
2757 spin_lock_irq(&ev_queue->lock);
2758 ev_queue->is_destroyed = 1;
2759 spin_unlock_irq(&ev_queue->lock);
2760 wake_up_interruptible(&ev_queue->poll_wait);
2762 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2764 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2765 list_for_each_entry_safe(entry, tmp,
2766 &comp_ev_file->ev_queue.event_list, list) {
2767 list_del(&entry->list);
2770 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2773 static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2774 enum rdma_remove_reason why)
2776 struct devx_async_event_file *ev_file =
2777 container_of(uobj, struct devx_async_event_file,
2779 struct devx_event_subscription *event_sub, *event_sub_tmp;
2780 struct mlx5_ib_dev *dev = ev_file->dev;
2782 spin_lock_irq(&ev_file->lock);
2783 ev_file->is_destroyed = 1;
2785 /* free the pending events allocation */
2786 if (ev_file->omit_data) {
2787 struct devx_event_subscription *event_sub, *tmp;
2789 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2791 list_del_init(&event_sub->event_list);
2794 struct devx_async_event_data *entry, *tmp;
2796 list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2798 list_del(&entry->list);
2803 spin_unlock_irq(&ev_file->lock);
2804 wake_up_interruptible(&ev_file->poll_wait);
2806 mutex_lock(&dev->devx_event_table.event_xa_lock);
2807 /* delete the subscriptions which are related to this FD */
2808 list_for_each_entry_safe(event_sub, event_sub_tmp,
2809 &ev_file->subscribed_events_list, file_list) {
2810 devx_cleanup_subscription(dev, event_sub);
2811 list_del_rcu(&event_sub->file_list);
2812 /* subscription may not be used by the read API any more */
2813 call_rcu(&event_sub->rcu, devx_free_subscription);
2815 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2817 put_device(&dev->ib_dev.dev);
2820 DECLARE_UVERBS_NAMED_METHOD(
2821 MLX5_IB_METHOD_DEVX_UMEM_REG,
2822 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2823 MLX5_IB_OBJECT_DEVX_UMEM,
2826 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2827 UVERBS_ATTR_TYPE(u64),
2829 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2830 UVERBS_ATTR_TYPE(u64),
2832 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2833 enum ib_access_flags),
2834 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2836 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2837 UVERBS_ATTR_TYPE(u32),
2840 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2841 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2842 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2843 MLX5_IB_OBJECT_DEVX_UMEM,
2844 UVERBS_ACCESS_DESTROY,
2847 DECLARE_UVERBS_NAMED_METHOD(
2848 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2849 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2850 UVERBS_ATTR_TYPE(u32),
2852 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2853 UVERBS_ATTR_TYPE(u32),
2856 DECLARE_UVERBS_NAMED_METHOD(
2857 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2858 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2859 UVERBS_ATTR_TYPE(u32),
2861 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2862 UVERBS_ATTR_TYPE(u32),
2865 DECLARE_UVERBS_NAMED_METHOD(
2866 MLX5_IB_METHOD_DEVX_OTHER,
2868 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2869 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2872 UVERBS_ATTR_PTR_OUT(
2873 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2874 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2877 DECLARE_UVERBS_NAMED_METHOD(
2878 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2879 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2880 MLX5_IB_OBJECT_DEVX_OBJ,
2884 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2885 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2888 UVERBS_ATTR_PTR_OUT(
2889 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2890 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2893 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2894 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2895 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2896 MLX5_IB_OBJECT_DEVX_OBJ,
2897 UVERBS_ACCESS_DESTROY,
2900 DECLARE_UVERBS_NAMED_METHOD(
2901 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2902 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2903 UVERBS_IDR_ANY_OBJECT,
2904 UVERBS_ACCESS_WRITE,
2907 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2908 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2911 UVERBS_ATTR_PTR_OUT(
2912 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2913 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2916 DECLARE_UVERBS_NAMED_METHOD(
2917 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2918 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2919 UVERBS_IDR_ANY_OBJECT,
2923 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2924 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2927 UVERBS_ATTR_PTR_OUT(
2928 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2929 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2932 DECLARE_UVERBS_NAMED_METHOD(
2933 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2934 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2935 UVERBS_IDR_ANY_OBJECT,
2939 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2940 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2943 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2945 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2946 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2949 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2950 UVERBS_ATTR_TYPE(u64),
2953 DECLARE_UVERBS_NAMED_METHOD(
2954 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2955 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2956 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2959 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2960 MLX5_IB_OBJECT_DEVX_OBJ,
2963 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2964 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2967 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2968 UVERBS_ATTR_TYPE(u64),
2970 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2971 UVERBS_ATTR_TYPE(u32),
2974 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2975 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2976 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2977 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2978 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2980 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2981 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2982 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2983 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2984 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2985 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2986 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2988 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2989 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2990 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2991 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2994 DECLARE_UVERBS_NAMED_METHOD(
2995 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2996 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2997 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3001 DECLARE_UVERBS_NAMED_OBJECT(
3002 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3003 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3004 devx_async_cmd_event_destroy_uobj,
3005 &devx_async_cmd_event_fops, "[devx_async_cmd]",
3007 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3009 DECLARE_UVERBS_NAMED_METHOD(
3010 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3011 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3012 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3015 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3016 enum mlx5_ib_uapi_devx_create_event_channel_flags,
3019 DECLARE_UVERBS_NAMED_OBJECT(
3020 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3021 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3022 devx_async_event_destroy_uobj,
3023 &devx_async_event_fops, "[devx_async_event]",
3025 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3027 static bool devx_is_supported(struct ib_device *device)
3029 struct mlx5_ib_dev *dev = to_mdev(device);
3031 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3034 const struct uapi_definition mlx5_ib_devx_defs[] = {
3035 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3036 MLX5_IB_OBJECT_DEVX,
3037 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3038 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3039 MLX5_IB_OBJECT_DEVX_OBJ,
3040 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3041 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3042 MLX5_IB_OBJECT_DEVX_UMEM,
3043 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3044 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3045 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3046 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3047 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3048 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3049 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),