1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
18 #define UVERBS_MODULE_NAME mlx5_ib
19 #include <rdma/uverbs_named_ioctl.h>
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
23 DEVX_OBJ_FLAGS_DCT = 1 << 1,
26 struct devx_async_data {
27 struct mlx5_ib_dev *mdev;
28 struct list_head list;
29 struct ib_uobject *fd_uobj;
30 struct mlx5_async_work cb_work;
32 /* must be last field in this structure */
33 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
36 #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
38 struct mlx5_core_dev *mdev;
40 u32 dinlen; /* destroy inbox length */
41 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
44 struct mlx5_ib_devx_mr devx_mr;
45 struct mlx5_core_dct core_dct;
50 struct mlx5_core_dev *mdev;
56 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
59 struct devx_umem_reg_cmd {
62 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
65 static struct mlx5_ib_ucontext *
66 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
68 return to_mucontext(ib_uverbs_get_ucontext(attrs));
71 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
73 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
74 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
80 /* 0 means not supported */
81 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
84 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
85 if (is_user && capable(CAP_NET_RAW) &&
86 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
87 cap |= MLX5_UCTX_CAP_RAW_TX;
88 if (is_user && capable(CAP_SYS_RAWIO) &&
89 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
90 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
91 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
93 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
94 MLX5_SET(uctx, uctx, cap, cap);
96 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
100 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
104 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
106 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
107 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
109 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
110 MLX5_SET(destroy_uctx_in, in, uid, uid);
112 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
115 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
117 struct devx_obj *devx_obj = obj;
118 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
121 case MLX5_CMD_OP_DESTROY_TIR:
122 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
123 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
127 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
128 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
129 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
137 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
139 struct devx_obj *devx_obj = obj;
140 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
142 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
143 *counter_id = MLX5_GET(dealloc_flow_counter_in,
153 * As the obj_id in the firmware is not globally unique the object type
154 * must be considered upon checking for a valid object id.
155 * For that the opcode of the creator command is encoded as part of the obj_id.
157 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
159 return ((u64)opcode << 32) | obj_id;
162 static u64 devx_get_obj_id(const void *in)
164 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
168 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
169 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
170 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
171 MLX5_GET(general_obj_in_cmd_hdr, in,
173 MLX5_GET(general_obj_in_cmd_hdr, in,
176 case MLX5_CMD_OP_QUERY_MKEY:
177 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
178 MLX5_GET(query_mkey_in, in,
181 case MLX5_CMD_OP_QUERY_CQ:
182 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
183 MLX5_GET(query_cq_in, in, cqn));
185 case MLX5_CMD_OP_MODIFY_CQ:
186 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
187 MLX5_GET(modify_cq_in, in, cqn));
189 case MLX5_CMD_OP_QUERY_SQ:
190 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
191 MLX5_GET(query_sq_in, in, sqn));
193 case MLX5_CMD_OP_MODIFY_SQ:
194 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
195 MLX5_GET(modify_sq_in, in, sqn));
197 case MLX5_CMD_OP_QUERY_RQ:
198 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
199 MLX5_GET(query_rq_in, in, rqn));
201 case MLX5_CMD_OP_MODIFY_RQ:
202 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
203 MLX5_GET(modify_rq_in, in, rqn));
205 case MLX5_CMD_OP_QUERY_RMP:
206 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
207 MLX5_GET(query_rmp_in, in, rmpn));
209 case MLX5_CMD_OP_MODIFY_RMP:
210 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
211 MLX5_GET(modify_rmp_in, in, rmpn));
213 case MLX5_CMD_OP_QUERY_RQT:
214 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
215 MLX5_GET(query_rqt_in, in, rqtn));
217 case MLX5_CMD_OP_MODIFY_RQT:
218 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
219 MLX5_GET(modify_rqt_in, in, rqtn));
221 case MLX5_CMD_OP_QUERY_TIR:
222 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
223 MLX5_GET(query_tir_in, in, tirn));
225 case MLX5_CMD_OP_MODIFY_TIR:
226 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
227 MLX5_GET(modify_tir_in, in, tirn));
229 case MLX5_CMD_OP_QUERY_TIS:
230 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
231 MLX5_GET(query_tis_in, in, tisn));
233 case MLX5_CMD_OP_MODIFY_TIS:
234 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
235 MLX5_GET(modify_tis_in, in, tisn));
237 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
238 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
239 MLX5_GET(query_flow_table_in, in,
242 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
243 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
244 MLX5_GET(modify_flow_table_in, in,
247 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
248 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
249 MLX5_GET(query_flow_group_in, in,
252 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
253 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
254 MLX5_GET(query_fte_in, in,
257 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
258 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
259 MLX5_GET(set_fte_in, in, flow_index));
261 case MLX5_CMD_OP_QUERY_Q_COUNTER:
262 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
263 MLX5_GET(query_q_counter_in, in,
266 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
267 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
268 MLX5_GET(query_flow_counter_in, in,
271 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
272 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
273 MLX5_GET(general_obj_in_cmd_hdr, in,
276 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
277 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
278 MLX5_GET(query_scheduling_element_in,
279 in, scheduling_element_id));
281 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
282 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
283 MLX5_GET(modify_scheduling_element_in,
284 in, scheduling_element_id));
286 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
287 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
288 MLX5_GET(add_vxlan_udp_dport_in, in,
291 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
292 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
293 MLX5_GET(query_l2_table_entry_in, in,
296 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
297 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
298 MLX5_GET(set_l2_table_entry_in, in,
301 case MLX5_CMD_OP_QUERY_QP:
302 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
303 MLX5_GET(query_qp_in, in, qpn));
305 case MLX5_CMD_OP_RST2INIT_QP:
306 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
307 MLX5_GET(rst2init_qp_in, in, qpn));
309 case MLX5_CMD_OP_INIT2RTR_QP:
310 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
311 MLX5_GET(init2rtr_qp_in, in, qpn));
313 case MLX5_CMD_OP_RTR2RTS_QP:
314 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
315 MLX5_GET(rtr2rts_qp_in, in, qpn));
317 case MLX5_CMD_OP_RTS2RTS_QP:
318 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
319 MLX5_GET(rts2rts_qp_in, in, qpn));
321 case MLX5_CMD_OP_SQERR2RTS_QP:
322 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
323 MLX5_GET(sqerr2rts_qp_in, in, qpn));
325 case MLX5_CMD_OP_2ERR_QP:
326 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
327 MLX5_GET(qp_2err_in, in, qpn));
329 case MLX5_CMD_OP_2RST_QP:
330 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
331 MLX5_GET(qp_2rst_in, in, qpn));
333 case MLX5_CMD_OP_QUERY_DCT:
334 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
335 MLX5_GET(query_dct_in, in, dctn));
337 case MLX5_CMD_OP_QUERY_XRQ:
338 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
339 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
340 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
341 MLX5_GET(query_xrq_in, in, xrqn));
343 case MLX5_CMD_OP_QUERY_XRC_SRQ:
344 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
345 MLX5_GET(query_xrc_srq_in, in,
348 case MLX5_CMD_OP_ARM_XRC_SRQ:
349 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
350 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
352 case MLX5_CMD_OP_QUERY_SRQ:
353 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
354 MLX5_GET(query_srq_in, in, srqn));
356 case MLX5_CMD_OP_ARM_RQ:
357 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
358 MLX5_GET(arm_rq_in, in, srq_number));
360 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
361 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
362 MLX5_GET(drain_dct_in, in, dctn));
364 case MLX5_CMD_OP_ARM_XRQ:
365 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
366 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
367 MLX5_GET(arm_xrq_in, in, xrqn));
369 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
370 obj_id = get_enc_obj_id
371 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
372 MLX5_GET(query_packet_reformat_context_in,
373 in, packet_reformat_id));
382 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
383 struct ib_uobject *uobj, const void *in)
385 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
386 u64 obj_id = devx_get_obj_id(in);
391 switch (uobj_get_object_id(uobj)) {
392 case UVERBS_OBJECT_CQ:
393 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
394 to_mcq(uobj->object)->mcq.cqn) ==
397 case UVERBS_OBJECT_SRQ:
399 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
402 switch (srq->common.res) {
404 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
407 opcode = MLX5_CMD_OP_CREATE_XRQ;
410 if (!dev->mdev->issi)
411 opcode = MLX5_CMD_OP_CREATE_SRQ;
413 opcode = MLX5_CMD_OP_CREATE_RMP;
416 return get_enc_obj_id(opcode,
417 to_msrq(uobj->object)->msrq.srqn) ==
421 case UVERBS_OBJECT_QP:
423 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
424 enum ib_qp_type qp_type = qp->ibqp.qp_type;
426 if (qp_type == IB_QPT_RAW_PACKET ||
427 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
428 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
430 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
431 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
433 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
434 rq->base.mqp.qpn) == obj_id ||
435 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
436 sq->base.mqp.qpn) == obj_id ||
437 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
438 rq->tirn) == obj_id ||
439 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
440 sq->tisn) == obj_id);
443 if (qp_type == MLX5_IB_QPT_DCT)
444 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
445 qp->dct.mdct.mqp.qpn) == obj_id;
447 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
448 qp->ibqp.qp_num) == obj_id;
451 case UVERBS_OBJECT_WQ:
452 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
453 to_mrwq(uobj->object)->core_qp.qpn) ==
456 case UVERBS_OBJECT_RWQ_IND_TBL:
457 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
458 to_mrwq_ind_table(uobj->object)->rqtn) ==
461 case MLX5_IB_OBJECT_DEVX_OBJ:
462 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
469 static void devx_set_umem_valid(const void *in)
471 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
474 case MLX5_CMD_OP_CREATE_MKEY:
475 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
477 case MLX5_CMD_OP_CREATE_CQ:
481 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
482 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
483 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
486 case MLX5_CMD_OP_CREATE_QP:
490 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
491 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
492 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
496 case MLX5_CMD_OP_CREATE_RQ:
500 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
501 wq = MLX5_ADDR_OF(rqc, rqc, wq);
502 MLX5_SET(wq, wq, dbr_umem_valid, 1);
503 MLX5_SET(wq, wq, wq_umem_valid, 1);
507 case MLX5_CMD_OP_CREATE_SQ:
511 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
512 wq = MLX5_ADDR_OF(sqc, sqc, wq);
513 MLX5_SET(wq, wq, dbr_umem_valid, 1);
514 MLX5_SET(wq, wq, wq_umem_valid, 1);
518 case MLX5_CMD_OP_MODIFY_CQ:
519 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
522 case MLX5_CMD_OP_CREATE_RMP:
526 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
527 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
528 MLX5_SET(wq, wq, dbr_umem_valid, 1);
529 MLX5_SET(wq, wq, wq_umem_valid, 1);
533 case MLX5_CMD_OP_CREATE_XRQ:
537 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
538 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
539 MLX5_SET(wq, wq, dbr_umem_valid, 1);
540 MLX5_SET(wq, wq, wq_umem_valid, 1);
544 case MLX5_CMD_OP_CREATE_XRC_SRQ:
548 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
549 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
550 xrc_srq_context_entry);
551 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
560 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
562 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
565 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
566 case MLX5_CMD_OP_CREATE_MKEY:
567 case MLX5_CMD_OP_CREATE_CQ:
568 case MLX5_CMD_OP_ALLOC_PD:
569 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
570 case MLX5_CMD_OP_CREATE_RMP:
571 case MLX5_CMD_OP_CREATE_SQ:
572 case MLX5_CMD_OP_CREATE_RQ:
573 case MLX5_CMD_OP_CREATE_RQT:
574 case MLX5_CMD_OP_CREATE_TIR:
575 case MLX5_CMD_OP_CREATE_TIS:
576 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
577 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
578 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
579 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
580 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
581 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
582 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
583 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
584 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
585 case MLX5_CMD_OP_CREATE_QP:
586 case MLX5_CMD_OP_CREATE_SRQ:
587 case MLX5_CMD_OP_CREATE_XRC_SRQ:
588 case MLX5_CMD_OP_CREATE_DCT:
589 case MLX5_CMD_OP_CREATE_XRQ:
590 case MLX5_CMD_OP_ATTACH_TO_MCG:
591 case MLX5_CMD_OP_ALLOC_XRCD:
593 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
595 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
605 static bool devx_is_obj_modify_cmd(const void *in)
607 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
610 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
611 case MLX5_CMD_OP_MODIFY_CQ:
612 case MLX5_CMD_OP_MODIFY_RMP:
613 case MLX5_CMD_OP_MODIFY_SQ:
614 case MLX5_CMD_OP_MODIFY_RQ:
615 case MLX5_CMD_OP_MODIFY_RQT:
616 case MLX5_CMD_OP_MODIFY_TIR:
617 case MLX5_CMD_OP_MODIFY_TIS:
618 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
619 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
620 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
621 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
622 case MLX5_CMD_OP_RST2INIT_QP:
623 case MLX5_CMD_OP_INIT2RTR_QP:
624 case MLX5_CMD_OP_RTR2RTS_QP:
625 case MLX5_CMD_OP_RTS2RTS_QP:
626 case MLX5_CMD_OP_SQERR2RTS_QP:
627 case MLX5_CMD_OP_2ERR_QP:
628 case MLX5_CMD_OP_2RST_QP:
629 case MLX5_CMD_OP_ARM_XRC_SRQ:
630 case MLX5_CMD_OP_ARM_RQ:
631 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
632 case MLX5_CMD_OP_ARM_XRQ:
633 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
635 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
637 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
648 static bool devx_is_obj_query_cmd(const void *in)
650 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
653 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
654 case MLX5_CMD_OP_QUERY_MKEY:
655 case MLX5_CMD_OP_QUERY_CQ:
656 case MLX5_CMD_OP_QUERY_RMP:
657 case MLX5_CMD_OP_QUERY_SQ:
658 case MLX5_CMD_OP_QUERY_RQ:
659 case MLX5_CMD_OP_QUERY_RQT:
660 case MLX5_CMD_OP_QUERY_TIR:
661 case MLX5_CMD_OP_QUERY_TIS:
662 case MLX5_CMD_OP_QUERY_Q_COUNTER:
663 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
664 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
665 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
666 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
667 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
668 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
669 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
670 case MLX5_CMD_OP_QUERY_QP:
671 case MLX5_CMD_OP_QUERY_SRQ:
672 case MLX5_CMD_OP_QUERY_XRC_SRQ:
673 case MLX5_CMD_OP_QUERY_DCT:
674 case MLX5_CMD_OP_QUERY_XRQ:
675 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
676 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
677 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
684 static bool devx_is_whitelist_cmd(void *in)
686 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
689 case MLX5_CMD_OP_QUERY_HCA_CAP:
690 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
691 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
698 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
700 if (devx_is_whitelist_cmd(cmd_in)) {
701 struct mlx5_ib_dev *dev;
706 dev = to_mdev(c->ibucontext.device);
707 if (dev->devx_whitelist_uid)
708 return dev->devx_whitelist_uid;
718 static bool devx_is_general_cmd(void *in)
720 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
722 if (opcode >= MLX5_CMD_OP_GENERAL_START &&
723 opcode < MLX5_CMD_OP_GENERAL_END)
727 case MLX5_CMD_OP_QUERY_HCA_CAP:
728 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
729 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
730 case MLX5_CMD_OP_QUERY_VPORT_STATE:
731 case MLX5_CMD_OP_QUERY_ADAPTER:
732 case MLX5_CMD_OP_QUERY_ISSI:
733 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
734 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
735 case MLX5_CMD_OP_QUERY_VNIC_ENV:
736 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
737 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
738 case MLX5_CMD_OP_NOP:
739 case MLX5_CMD_OP_QUERY_CONG_STATUS:
740 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
741 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
748 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
749 struct uverbs_attr_bundle *attrs)
751 struct mlx5_ib_ucontext *c;
752 struct mlx5_ib_dev *dev;
758 if (uverbs_copy_from(&user_vector, attrs,
759 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
762 c = devx_ufile2uctx(attrs);
765 dev = to_mdev(c->ibucontext.device);
767 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
771 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
772 &dev_eqn, sizeof(dev_eqn)))
780 * The hardware protection mechanism works like this: Each device object that
781 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
782 * the device specification manual) upon its creation. Then upon doorbell,
783 * hardware fetches the object context for which the doorbell was rang, and
784 * validates that the UAR through which the DB was rang matches the UAR ID
786 * If no match the doorbell is silently ignored by the hardware. Of course,
787 * the user cannot ring a doorbell on a UAR that was not mapped to it.
788 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
789 * mailboxes (except tagging them with UID), we expose to the user its UAR
790 * ID, so it can embed it in these objects in the expected specification
791 * format. So the only thing the user can do is hurt itself by creating a
792 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
793 * may ring a doorbell on its objects.
794 * The consequence of that will be that another user can schedule a QP/SQ
795 * of the buggy user for execution (just insert it to the hardware schedule
796 * queue or arm its CQ for event generation), no further harm is expected.
798 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
799 struct uverbs_attr_bundle *attrs)
801 struct mlx5_ib_ucontext *c;
802 struct mlx5_ib_dev *dev;
806 c = devx_ufile2uctx(attrs);
809 dev = to_mdev(c->ibucontext.device);
811 if (uverbs_copy_from(&user_idx, attrs,
812 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
815 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
819 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
820 &dev_idx, sizeof(dev_idx)))
826 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
827 struct uverbs_attr_bundle *attrs)
829 struct mlx5_ib_ucontext *c;
830 struct mlx5_ib_dev *dev;
831 void *cmd_in = uverbs_attr_get_alloced_ptr(
832 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
833 int cmd_out_len = uverbs_attr_get_len(attrs,
834 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
839 c = devx_ufile2uctx(attrs);
842 dev = to_mdev(c->ibucontext.device);
844 uid = devx_get_uid(c, cmd_in);
848 /* Only white list of some general HCA commands are allowed for this method. */
849 if (!devx_is_general_cmd(cmd_in))
852 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
854 return PTR_ERR(cmd_out);
856 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
857 err = mlx5_cmd_exec(dev->mdev, cmd_in,
858 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
859 cmd_out, cmd_out_len);
863 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
867 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
871 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
872 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
874 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
875 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
877 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
878 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
880 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
881 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
882 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
883 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
886 case MLX5_CMD_OP_CREATE_UMEM:
887 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
888 MLX5_CMD_OP_DESTROY_UMEM);
890 case MLX5_CMD_OP_CREATE_MKEY:
891 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
893 case MLX5_CMD_OP_CREATE_CQ:
894 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
896 case MLX5_CMD_OP_ALLOC_PD:
897 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
899 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
900 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
901 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
903 case MLX5_CMD_OP_CREATE_RMP:
904 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
906 case MLX5_CMD_OP_CREATE_SQ:
907 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
909 case MLX5_CMD_OP_CREATE_RQ:
910 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
912 case MLX5_CMD_OP_CREATE_RQT:
913 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
915 case MLX5_CMD_OP_CREATE_TIR:
916 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
918 case MLX5_CMD_OP_CREATE_TIS:
919 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
921 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
922 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
923 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
925 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
926 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
927 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
928 MLX5_SET(destroy_flow_table_in, din, other_vport,
929 MLX5_GET(create_flow_table_in, in, other_vport));
930 MLX5_SET(destroy_flow_table_in, din, vport_number,
931 MLX5_GET(create_flow_table_in, in, vport_number));
932 MLX5_SET(destroy_flow_table_in, din, table_type,
933 MLX5_GET(create_flow_table_in, in, table_type));
934 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
935 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
936 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
938 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
939 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
940 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
941 MLX5_SET(destroy_flow_group_in, din, other_vport,
942 MLX5_GET(create_flow_group_in, in, other_vport));
943 MLX5_SET(destroy_flow_group_in, din, vport_number,
944 MLX5_GET(create_flow_group_in, in, vport_number));
945 MLX5_SET(destroy_flow_group_in, din, table_type,
946 MLX5_GET(create_flow_group_in, in, table_type));
947 MLX5_SET(destroy_flow_group_in, din, table_id,
948 MLX5_GET(create_flow_group_in, in, table_id));
949 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
950 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
951 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
953 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
954 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
955 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
956 MLX5_SET(delete_fte_in, din, other_vport,
957 MLX5_GET(set_fte_in, in, other_vport));
958 MLX5_SET(delete_fte_in, din, vport_number,
959 MLX5_GET(set_fte_in, in, vport_number));
960 MLX5_SET(delete_fte_in, din, table_type,
961 MLX5_GET(set_fte_in, in, table_type));
962 MLX5_SET(delete_fte_in, din, table_id,
963 MLX5_GET(set_fte_in, in, table_id));
964 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
965 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
966 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
968 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
969 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
970 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
972 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
973 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
974 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
976 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
977 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
978 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
980 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
981 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
982 *obj_id = MLX5_GET(create_scheduling_element_out, out,
983 scheduling_element_id);
984 MLX5_SET(destroy_scheduling_element_in, din,
985 scheduling_hierarchy,
986 MLX5_GET(create_scheduling_element_in, in,
987 scheduling_hierarchy));
988 MLX5_SET(destroy_scheduling_element_in, din,
989 scheduling_element_id, *obj_id);
990 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
991 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
993 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
994 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
995 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
996 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
997 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
998 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1000 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1001 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1002 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1003 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1004 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1005 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1007 case MLX5_CMD_OP_CREATE_QP:
1008 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1010 case MLX5_CMD_OP_CREATE_SRQ:
1011 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1013 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1014 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1015 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1017 case MLX5_CMD_OP_CREATE_DCT:
1018 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1020 case MLX5_CMD_OP_CREATE_XRQ:
1021 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1023 case MLX5_CMD_OP_ATTACH_TO_MCG:
1024 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1025 MLX5_SET(detach_from_mcg_in, din, qpn,
1026 MLX5_GET(attach_to_mcg_in, in, qpn));
1027 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1028 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1029 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1030 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1032 case MLX5_CMD_OP_ALLOC_XRCD:
1033 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1036 /* The entry must match to one of the devx_is_obj_create_cmd */
1042 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1043 struct mlx5_ib_dev *dev,
1044 void *in, void *out)
1046 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
1047 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1048 unsigned long flags;
1049 struct mlx5_core_mkey *mkey;
1054 mkey = &devx_mr->mmkey;
1055 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1056 key = MLX5_GET(mkc, mkc, mkey_7_0);
1057 mkey->key = mlx5_idx_to_mkey(
1058 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1059 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1060 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1061 mkey->size = MLX5_GET64(mkc, mkc, len);
1062 mkey->pd = MLX5_GET(mkc, mkc, pd);
1063 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1065 write_lock_irqsave(&table->lock, flags);
1066 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
1068 write_unlock_irqrestore(&table->lock, flags);
1072 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1073 struct devx_obj *obj,
1074 void *in, int in_len)
1076 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1077 MLX5_FLD_SZ_BYTES(create_mkey_in,
1078 memory_key_mkey_entry);
1082 if (in_len < min_len)
1085 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1087 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1088 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1090 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1091 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1092 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1093 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1097 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1101 static void devx_free_indirect_mkey(struct rcu_head *rcu)
1103 kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
1106 /* This function to delete from the radix tree needs to be called before
1107 * destroying the underlying mkey. Otherwise a race might occur in case that
1108 * other thread will get the same mkey before this one will be deleted,
1109 * in that case it will fail via inserting to the tree its own data.
1112 * An error in the destroy is not expected unless there is some other indirect
1113 * mkey which points to this one. In a kernel cleanup flow it will be just
1114 * destroyed in the iterative destruction call. In a user flow, in case
1115 * the application didn't close in the expected order it's its own problem,
1116 * the mkey won't be part of the tree, in both cases the kernel is safe.
1118 static void devx_cleanup_mkey(struct devx_obj *obj)
1120 struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
1121 unsigned long flags;
1123 write_lock_irqsave(&table->lock, flags);
1124 radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
1125 write_unlock_irqrestore(&table->lock, flags);
1128 static int devx_obj_cleanup(struct ib_uobject *uobject,
1129 enum rdma_remove_reason why,
1130 struct uverbs_attr_bundle *attrs)
1132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1133 struct devx_obj *obj = uobject->object;
1136 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1137 devx_cleanup_mkey(obj);
1139 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1140 ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1142 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1144 if (ib_is_destroy_retryable(ret, why, uobject))
1147 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1148 struct mlx5_ib_dev *dev =
1149 mlx5_udata_to_mdev(&attrs->driver_udata);
1151 call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
1152 devx_free_indirect_mkey);
1160 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1161 struct uverbs_attr_bundle *attrs)
1163 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1164 int cmd_out_len = uverbs_attr_get_len(attrs,
1165 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1166 int cmd_in_len = uverbs_attr_get_len(attrs,
1167 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1169 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1170 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1171 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1172 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1173 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1174 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1175 struct devx_obj *obj;
1182 uid = devx_get_uid(c, cmd_in);
1186 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1189 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1190 if (IS_ERR(cmd_out))
1191 return PTR_ERR(cmd_out);
1193 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1197 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1198 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1199 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1203 devx_set_umem_valid(cmd_in);
1206 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1207 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1208 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1210 cmd_out, cmd_out_len);
1212 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1214 cmd_out, cmd_out_len);
1221 obj->mdev = dev->mdev;
1222 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1224 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1226 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1227 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1232 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1236 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1237 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1239 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1244 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1245 devx_cleanup_mkey(obj);
1247 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1248 mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1250 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1257 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1258 struct uverbs_attr_bundle *attrs)
1260 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1261 int cmd_out_len = uverbs_attr_get_len(attrs,
1262 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1263 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1264 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1265 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1266 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1267 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1272 uid = devx_get_uid(c, cmd_in);
1276 if (!devx_is_obj_modify_cmd(cmd_in))
1279 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1282 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1283 if (IS_ERR(cmd_out))
1284 return PTR_ERR(cmd_out);
1286 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1287 devx_set_umem_valid(cmd_in);
1289 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1290 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1291 cmd_out, cmd_out_len);
1295 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1296 cmd_out, cmd_out_len);
1299 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1300 struct uverbs_attr_bundle *attrs)
1302 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1303 int cmd_out_len = uverbs_attr_get_len(attrs,
1304 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1305 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1306 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1307 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1308 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1312 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1314 uid = devx_get_uid(c, cmd_in);
1318 if (!devx_is_obj_query_cmd(cmd_in))
1321 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1324 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1325 if (IS_ERR(cmd_out))
1326 return PTR_ERR(cmd_out);
1328 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1329 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1330 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1331 cmd_out, cmd_out_len);
1335 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1336 cmd_out, cmd_out_len);
1339 struct devx_async_event_queue {
1341 wait_queue_head_t poll_wait;
1342 struct list_head event_list;
1343 atomic_t bytes_in_use;
1347 struct devx_async_cmd_event_file {
1348 struct ib_uobject uobj;
1349 struct devx_async_event_queue ev_queue;
1350 struct mlx5_async_ctx async_ctx;
1353 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1355 spin_lock_init(&ev_queue->lock);
1356 INIT_LIST_HEAD(&ev_queue->event_list);
1357 init_waitqueue_head(&ev_queue->poll_wait);
1358 atomic_set(&ev_queue->bytes_in_use, 0);
1359 ev_queue->is_destroyed = 0;
1362 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1363 struct uverbs_attr_bundle *attrs)
1365 struct devx_async_cmd_event_file *ev_file;
1367 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1368 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1369 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1371 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1373 devx_init_event_queue(&ev_file->ev_queue);
1374 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1378 static void devx_query_callback(int status, struct mlx5_async_work *context)
1380 struct devx_async_data *async_data =
1381 container_of(context, struct devx_async_data, cb_work);
1382 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1383 struct devx_async_cmd_event_file *ev_file;
1384 struct devx_async_event_queue *ev_queue;
1385 unsigned long flags;
1387 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1389 ev_queue = &ev_file->ev_queue;
1391 spin_lock_irqsave(&ev_queue->lock, flags);
1392 list_add_tail(&async_data->list, &ev_queue->event_list);
1393 spin_unlock_irqrestore(&ev_queue->lock, flags);
1395 wake_up_interruptible(&ev_queue->poll_wait);
1396 fput(fd_uobj->object);
1399 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1401 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1402 struct uverbs_attr_bundle *attrs)
1404 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1405 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1406 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1408 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1410 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1411 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1412 struct ib_uobject *fd_uobj;
1415 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1416 struct devx_async_cmd_event_file *ev_file;
1417 struct devx_async_data *async_data;
1419 uid = devx_get_uid(c, cmd_in);
1423 if (!devx_is_obj_query_cmd(cmd_in))
1426 err = uverbs_get_const(&cmd_out_len, attrs,
1427 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1431 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1434 fd_uobj = uverbs_attr_get_uobject(attrs,
1435 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1436 if (IS_ERR(fd_uobj))
1437 return PTR_ERR(fd_uobj);
1439 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1442 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1443 MAX_ASYNC_BYTES_IN_USE) {
1444 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1448 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1449 cmd_out_len), GFP_KERNEL);
1455 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1456 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1460 async_data->cmd_out_len = cmd_out_len;
1461 async_data->mdev = mdev;
1462 async_data->fd_uobj = fd_uobj;
1464 get_file(fd_uobj->object);
1465 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1466 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1467 uverbs_attr_get_len(attrs,
1468 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1469 async_data->hdr.out_data,
1470 async_data->cmd_out_len,
1471 devx_query_callback, &async_data->cb_work);
1479 fput(fd_uobj->object);
1483 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1487 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1488 struct uverbs_attr_bundle *attrs,
1489 struct devx_umem *obj)
1498 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
1499 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
1502 err = uverbs_get_flags32(&access, attrs,
1503 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1504 IB_ACCESS_LOCAL_WRITE |
1505 IB_ACCESS_REMOTE_WRITE |
1506 IB_ACCESS_REMOTE_READ);
1510 err = ib_check_mr_access(access);
1514 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
1515 if (IS_ERR(obj->umem))
1516 return PTR_ERR(obj->umem);
1518 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
1519 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
1520 &obj->page_shift, &obj->ncont, NULL);
1523 ib_umem_release(obj->umem);
1527 page_mask = (1 << obj->page_shift) - 1;
1528 obj->page_offset = obj->umem->address & page_mask;
1533 static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
1534 struct devx_umem *obj,
1535 struct devx_umem_reg_cmd *cmd)
1537 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
1538 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
1539 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
1540 return PTR_ERR_OR_ZERO(cmd->in);
1543 static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
1544 struct devx_umem *obj,
1545 struct devx_umem_reg_cmd *cmd)
1550 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
1551 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
1553 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
1554 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
1555 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
1556 MLX5_ADAPTER_PAGE_SHIFT);
1557 MLX5_SET(umem, umem, page_offset, obj->page_offset);
1558 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
1559 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
1563 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
1564 struct uverbs_attr_bundle *attrs)
1566 struct devx_umem_reg_cmd cmd;
1567 struct devx_umem *obj;
1568 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1569 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
1571 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1572 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1573 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1579 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
1583 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
1587 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
1589 goto err_umem_release;
1591 devx_umem_reg_cmd_build(dev, obj, &cmd);
1593 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
1594 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
1597 goto err_umem_release;
1599 obj->mdev = dev->mdev;
1601 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
1602 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
1604 goto err_umem_destroy;
1609 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
1611 ib_umem_release(obj->umem);
1617 static int devx_umem_cleanup(struct ib_uobject *uobject,
1618 enum rdma_remove_reason why,
1619 struct uverbs_attr_bundle *attrs)
1621 struct devx_umem *obj = uobject->object;
1622 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1625 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
1626 if (ib_is_destroy_retryable(err, why, uobject))
1629 ib_umem_release(obj->umem);
1634 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
1635 size_t count, loff_t *pos)
1637 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1638 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1639 struct devx_async_data *event;
1643 spin_lock_irq(&ev_queue->lock);
1645 while (list_empty(&ev_queue->event_list)) {
1646 spin_unlock_irq(&ev_queue->lock);
1648 if (filp->f_flags & O_NONBLOCK)
1651 if (wait_event_interruptible(
1652 ev_queue->poll_wait,
1653 (!list_empty(&ev_queue->event_list) ||
1654 ev_queue->is_destroyed))) {
1655 return -ERESTARTSYS;
1658 if (list_empty(&ev_queue->event_list) &&
1659 ev_queue->is_destroyed)
1662 spin_lock_irq(&ev_queue->lock);
1665 event = list_entry(ev_queue->event_list.next,
1666 struct devx_async_data, list);
1667 eventsz = event->cmd_out_len +
1668 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
1670 if (eventsz > count) {
1671 spin_unlock_irq(&ev_queue->lock);
1675 list_del(ev_queue->event_list.next);
1676 spin_unlock_irq(&ev_queue->lock);
1678 if (copy_to_user(buf, &event->hdr, eventsz))
1683 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
1688 static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
1690 struct ib_uobject *uobj = filp->private_data;
1691 struct devx_async_cmd_event_file *comp_ev_file = container_of(
1692 uobj, struct devx_async_cmd_event_file, uobj);
1693 struct devx_async_data *entry, *tmp;
1695 spin_lock_irq(&comp_ev_file->ev_queue.lock);
1696 list_for_each_entry_safe(entry, tmp,
1697 &comp_ev_file->ev_queue.event_list, list)
1699 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
1701 uverbs_close_fd(filp);
1705 static __poll_t devx_async_cmd_event_poll(struct file *filp,
1706 struct poll_table_struct *wait)
1708 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1709 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1710 __poll_t pollflags = 0;
1712 poll_wait(filp, &ev_queue->poll_wait, wait);
1714 spin_lock_irq(&ev_queue->lock);
1715 if (ev_queue->is_destroyed)
1716 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1717 else if (!list_empty(&ev_queue->event_list))
1718 pollflags = EPOLLIN | EPOLLRDNORM;
1719 spin_unlock_irq(&ev_queue->lock);
1724 static const struct file_operations devx_async_cmd_event_fops = {
1725 .owner = THIS_MODULE,
1726 .read = devx_async_cmd_event_read,
1727 .poll = devx_async_cmd_event_poll,
1728 .release = devx_async_cmd_event_close,
1729 .llseek = no_llseek,
1732 static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
1733 enum rdma_remove_reason why)
1735 struct devx_async_cmd_event_file *comp_ev_file =
1736 container_of(uobj, struct devx_async_cmd_event_file,
1738 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1740 spin_lock_irq(&ev_queue->lock);
1741 ev_queue->is_destroyed = 1;
1742 spin_unlock_irq(&ev_queue->lock);
1744 if (why == RDMA_REMOVE_DRIVER_REMOVE)
1745 wake_up_interruptible(&ev_queue->poll_wait);
1747 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
1751 DECLARE_UVERBS_NAMED_METHOD(
1752 MLX5_IB_METHOD_DEVX_UMEM_REG,
1753 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
1754 MLX5_IB_OBJECT_DEVX_UMEM,
1757 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
1758 UVERBS_ATTR_TYPE(u64),
1760 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
1761 UVERBS_ATTR_TYPE(u64),
1763 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1764 enum ib_access_flags),
1765 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
1766 UVERBS_ATTR_TYPE(u32),
1769 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
1770 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
1771 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
1772 MLX5_IB_OBJECT_DEVX_UMEM,
1773 UVERBS_ACCESS_DESTROY,
1776 DECLARE_UVERBS_NAMED_METHOD(
1777 MLX5_IB_METHOD_DEVX_QUERY_EQN,
1778 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
1779 UVERBS_ATTR_TYPE(u32),
1781 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1782 UVERBS_ATTR_TYPE(u32),
1785 DECLARE_UVERBS_NAMED_METHOD(
1786 MLX5_IB_METHOD_DEVX_QUERY_UAR,
1787 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
1788 UVERBS_ATTR_TYPE(u32),
1790 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1791 UVERBS_ATTR_TYPE(u32),
1794 DECLARE_UVERBS_NAMED_METHOD(
1795 MLX5_IB_METHOD_DEVX_OTHER,
1797 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
1798 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1801 UVERBS_ATTR_PTR_OUT(
1802 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
1803 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
1806 DECLARE_UVERBS_NAMED_METHOD(
1807 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
1808 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
1809 MLX5_IB_OBJECT_DEVX_OBJ,
1813 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
1814 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1817 UVERBS_ATTR_PTR_OUT(
1818 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1819 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
1822 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
1823 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
1824 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
1825 MLX5_IB_OBJECT_DEVX_OBJ,
1826 UVERBS_ACCESS_DESTROY,
1829 DECLARE_UVERBS_NAMED_METHOD(
1830 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
1831 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
1832 UVERBS_IDR_ANY_OBJECT,
1833 UVERBS_ACCESS_WRITE,
1836 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
1837 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1840 UVERBS_ATTR_PTR_OUT(
1841 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1842 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
1845 DECLARE_UVERBS_NAMED_METHOD(
1846 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
1847 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1848 UVERBS_IDR_ANY_OBJECT,
1852 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1853 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1856 UVERBS_ATTR_PTR_OUT(
1857 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1858 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
1861 DECLARE_UVERBS_NAMED_METHOD(
1862 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
1863 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1864 UVERBS_IDR_ANY_OBJECT,
1868 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1869 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1872 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
1874 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
1875 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1878 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
1879 UVERBS_ATTR_TYPE(u64),
1882 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
1883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
1884 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
1885 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
1887 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
1888 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
1889 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
1890 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
1891 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
1892 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
1893 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
1895 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
1896 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
1897 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
1898 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
1901 DECLARE_UVERBS_NAMED_METHOD(
1902 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
1903 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
1904 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1908 DECLARE_UVERBS_NAMED_OBJECT(
1909 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1910 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
1911 devx_hot_unplug_async_cmd_event_file,
1912 &devx_async_cmd_event_fops, "[devx_async_cmd]",
1914 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
1916 static bool devx_is_supported(struct ib_device *device)
1918 struct mlx5_ib_dev *dev = to_mdev(device);
1920 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
1923 const struct uapi_definition mlx5_ib_devx_defs[] = {
1924 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1925 MLX5_IB_OBJECT_DEVX,
1926 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1927 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1928 MLX5_IB_OBJECT_DEVX_OBJ,
1929 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1930 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1931 MLX5_IB_OBJECT_DEVX_UMEM,
1932 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1933 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1934 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1935 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),