Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / devx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
4  */
5
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include "mlx5_ib.h"
17 #include "devx.h"
18 #include "qp.h"
19 #include <linux/xarray.h>
20
21 #define UVERBS_MODULE_NAME mlx5_ib
22 #include <rdma/uverbs_named_ioctl.h>
23
24 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
25
26 enum devx_obj_flags {
27         DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
28         DEVX_OBJ_FLAGS_DCT = 1 << 1,
29         DEVX_OBJ_FLAGS_CQ = 1 << 2,
30 };
31
32 struct devx_async_data {
33         struct mlx5_ib_dev *mdev;
34         struct list_head list;
35         struct devx_async_cmd_event_file *ev_file;
36         struct mlx5_async_work cb_work;
37         u16 cmd_out_len;
38         /* must be last field in this structure */
39         struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
40 };
41
42 struct devx_async_event_data {
43         struct list_head list; /* headed in ev_file->event_list */
44         struct mlx5_ib_uapi_devx_async_event_hdr hdr;
45 };
46
47 /* first level XA value data structure */
48 struct devx_event {
49         struct xarray object_ids; /* second XA level, Key = object id */
50         struct list_head unaffiliated_list;
51 };
52
53 /* second level XA value data structure */
54 struct devx_obj_event {
55         struct rcu_head rcu;
56         struct list_head obj_sub_list;
57 };
58
59 struct devx_event_subscription {
60         struct list_head file_list; /* headed in ev_file->
61                                      * subscribed_events_list
62                                      */
63         struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64                                    * devx_obj_event->obj_sub_list
65                                    */
66         struct list_head obj_list; /* headed in devx_object */
67         struct list_head event_list; /* headed in ev_file->event_list or in
68                                       * temp list via subscription
69                                       */
70
71         u8 is_cleaned:1;
72         u32 xa_key_level1;
73         u32 xa_key_level2;
74         struct rcu_head rcu;
75         u64 cookie;
76         struct devx_async_event_file *ev_file;
77         struct eventfd_ctx *eventfd;
78 };
79
80 struct devx_async_event_file {
81         struct ib_uobject uobj;
82         /* Head of events that are subscribed to this FD */
83         struct list_head subscribed_events_list;
84         spinlock_t lock;
85         wait_queue_head_t poll_wait;
86         struct list_head event_list;
87         struct mlx5_ib_dev *dev;
88         u8 omit_data:1;
89         u8 is_overflow_err:1;
90         u8 is_destroyed:1;
91 };
92
93 struct devx_umem {
94         struct mlx5_core_dev            *mdev;
95         struct ib_umem                  *umem;
96         u32                             dinlen;
97         u32                             dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
98 };
99
100 struct devx_umem_reg_cmd {
101         void                            *in;
102         u32                             inlen;
103         u32                             out[MLX5_ST_SZ_DW(create_umem_out)];
104 };
105
106 static struct mlx5_ib_ucontext *
107 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
108 {
109         return to_mucontext(ib_uverbs_get_ucontext(attrs));
110 }
111
112 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
113 {
114         u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
115         u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
116         void *uctx;
117         int err;
118         u16 uid;
119         u32 cap = 0;
120
121         /* 0 means not supported */
122         if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
123                 return -EINVAL;
124
125         uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
126         if (is_user && capable(CAP_NET_RAW) &&
127             (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
128                 cap |= MLX5_UCTX_CAP_RAW_TX;
129         if (is_user && capable(CAP_SYS_RAWIO) &&
130             (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
131              MLX5_UCTX_CAP_INTERNAL_DEV_RES))
132                 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
133
134         MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
135         MLX5_SET(uctx, uctx, cap, cap);
136
137         err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
138         if (err)
139                 return err;
140
141         uid = MLX5_GET(create_uctx_out, out, uid);
142         return uid;
143 }
144
145 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
146 {
147         u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
148         u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
149
150         MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
151         MLX5_SET(destroy_uctx_in, in, uid, uid);
152
153         mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
154 }
155
156 static bool is_legacy_unaffiliated_event_num(u16 event_num)
157 {
158         switch (event_num) {
159         case MLX5_EVENT_TYPE_PORT_CHANGE:
160                 return true;
161         default:
162                 return false;
163         }
164 }
165
166 static bool is_legacy_obj_event_num(u16 event_num)
167 {
168         switch (event_num) {
169         case MLX5_EVENT_TYPE_PATH_MIG:
170         case MLX5_EVENT_TYPE_COMM_EST:
171         case MLX5_EVENT_TYPE_SQ_DRAINED:
172         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
173         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
174         case MLX5_EVENT_TYPE_CQ_ERROR:
175         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
176         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
177         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
178         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
179         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
180         case MLX5_EVENT_TYPE_DCT_DRAINED:
181         case MLX5_EVENT_TYPE_COMP:
182         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
183         case MLX5_EVENT_TYPE_XRQ_ERROR:
184                 return true;
185         default:
186                 return false;
187         }
188 }
189
190 static u16 get_legacy_obj_type(u16 opcode)
191 {
192         switch (opcode) {
193         case MLX5_CMD_OP_CREATE_RQ:
194                 return MLX5_EVENT_QUEUE_TYPE_RQ;
195         case MLX5_CMD_OP_CREATE_QP:
196                 return MLX5_EVENT_QUEUE_TYPE_QP;
197         case MLX5_CMD_OP_CREATE_SQ:
198                 return MLX5_EVENT_QUEUE_TYPE_SQ;
199         case MLX5_CMD_OP_CREATE_DCT:
200                 return MLX5_EVENT_QUEUE_TYPE_DCT;
201         default:
202                 return 0;
203         }
204 }
205
206 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
207 {
208         u16 opcode;
209
210         opcode = (obj->obj_id >> 32) & 0xffff;
211
212         if (is_legacy_obj_event_num(event_num))
213                 return get_legacy_obj_type(opcode);
214
215         switch (opcode) {
216         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
217                 return (obj->obj_id >> 48);
218         case MLX5_CMD_OP_CREATE_RQ:
219                 return MLX5_OBJ_TYPE_RQ;
220         case MLX5_CMD_OP_CREATE_QP:
221                 return MLX5_OBJ_TYPE_QP;
222         case MLX5_CMD_OP_CREATE_SQ:
223                 return MLX5_OBJ_TYPE_SQ;
224         case MLX5_CMD_OP_CREATE_DCT:
225                 return MLX5_OBJ_TYPE_DCT;
226         case MLX5_CMD_OP_CREATE_TIR:
227                 return MLX5_OBJ_TYPE_TIR;
228         case MLX5_CMD_OP_CREATE_TIS:
229                 return MLX5_OBJ_TYPE_TIS;
230         case MLX5_CMD_OP_CREATE_PSV:
231                 return MLX5_OBJ_TYPE_PSV;
232         case MLX5_OBJ_TYPE_MKEY:
233                 return MLX5_OBJ_TYPE_MKEY;
234         case MLX5_CMD_OP_CREATE_RMP:
235                 return MLX5_OBJ_TYPE_RMP;
236         case MLX5_CMD_OP_CREATE_XRC_SRQ:
237                 return MLX5_OBJ_TYPE_XRC_SRQ;
238         case MLX5_CMD_OP_CREATE_XRQ:
239                 return MLX5_OBJ_TYPE_XRQ;
240         case MLX5_CMD_OP_CREATE_RQT:
241                 return MLX5_OBJ_TYPE_RQT;
242         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
243                 return MLX5_OBJ_TYPE_FLOW_COUNTER;
244         case MLX5_CMD_OP_CREATE_CQ:
245                 return MLX5_OBJ_TYPE_CQ;
246         default:
247                 return 0;
248         }
249 }
250
251 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
252 {
253         switch (event_type) {
254         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
255         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
256         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
257         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
258         case MLX5_EVENT_TYPE_PATH_MIG:
259         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
260         case MLX5_EVENT_TYPE_COMM_EST:
261         case MLX5_EVENT_TYPE_SQ_DRAINED:
262         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
263         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
264                 return eqe->data.qp_srq.type;
265         case MLX5_EVENT_TYPE_CQ_ERROR:
266         case MLX5_EVENT_TYPE_XRQ_ERROR:
267                 return 0;
268         case MLX5_EVENT_TYPE_DCT_DRAINED:
269         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
270                 return MLX5_EVENT_QUEUE_TYPE_DCT;
271         default:
272                 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
273         }
274 }
275
276 static u32 get_dec_obj_id(u64 obj_id)
277 {
278         return (obj_id & 0xffffffff);
279 }
280
281 /*
282  * As the obj_id in the firmware is not globally unique the object type
283  * must be considered upon checking for a valid object id.
284  * For that the opcode of the creator command is encoded as part of the obj_id.
285  */
286 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
287 {
288         return ((u64)opcode << 32) | obj_id;
289 }
290
291 static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
292 {
293         switch (opcode) {
294         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
295                 return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
296         case MLX5_CMD_OP_CREATE_UMEM:
297                 return MLX5_GET(create_umem_out, out, umem_id);
298         case MLX5_CMD_OP_CREATE_MKEY:
299                 return MLX5_GET(create_mkey_out, out, mkey_index);
300         case MLX5_CMD_OP_CREATE_CQ:
301                 return MLX5_GET(create_cq_out, out, cqn);
302         case MLX5_CMD_OP_ALLOC_PD:
303                 return MLX5_GET(alloc_pd_out, out, pd);
304         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
305                 return MLX5_GET(alloc_transport_domain_out, out,
306                                 transport_domain);
307         case MLX5_CMD_OP_CREATE_RMP:
308                 return MLX5_GET(create_rmp_out, out, rmpn);
309         case MLX5_CMD_OP_CREATE_SQ:
310                 return MLX5_GET(create_sq_out, out, sqn);
311         case MLX5_CMD_OP_CREATE_RQ:
312                 return MLX5_GET(create_rq_out, out, rqn);
313         case MLX5_CMD_OP_CREATE_RQT:
314                 return MLX5_GET(create_rqt_out, out, rqtn);
315         case MLX5_CMD_OP_CREATE_TIR:
316                 return MLX5_GET(create_tir_out, out, tirn);
317         case MLX5_CMD_OP_CREATE_TIS:
318                 return MLX5_GET(create_tis_out, out, tisn);
319         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
320                 return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
321         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
322                 return MLX5_GET(create_flow_table_out, out, table_id);
323         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
324                 return MLX5_GET(create_flow_group_out, out, group_id);
325         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
326                 return MLX5_GET(set_fte_in, in, flow_index);
327         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
328                 return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
329         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
330                 return MLX5_GET(alloc_packet_reformat_context_out, out,
331                                 packet_reformat_id);
332         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
333                 return MLX5_GET(alloc_modify_header_context_out, out,
334                                 modify_header_id);
335         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
336                 return MLX5_GET(create_scheduling_element_out, out,
337                                 scheduling_element_id);
338         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
339                 return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
340         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
341                 return MLX5_GET(set_l2_table_entry_in, in, table_index);
342         case MLX5_CMD_OP_CREATE_QP:
343                 return MLX5_GET(create_qp_out, out, qpn);
344         case MLX5_CMD_OP_CREATE_SRQ:
345                 return MLX5_GET(create_srq_out, out, srqn);
346         case MLX5_CMD_OP_CREATE_XRC_SRQ:
347                 return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
348         case MLX5_CMD_OP_CREATE_DCT:
349                 return MLX5_GET(create_dct_out, out, dctn);
350         case MLX5_CMD_OP_CREATE_XRQ:
351                 return MLX5_GET(create_xrq_out, out, xrqn);
352         case MLX5_CMD_OP_ATTACH_TO_MCG:
353                 return MLX5_GET(attach_to_mcg_in, in, qpn);
354         case MLX5_CMD_OP_ALLOC_XRCD:
355                 return MLX5_GET(alloc_xrcd_out, out, xrcd);
356         case MLX5_CMD_OP_CREATE_PSV:
357                 return MLX5_GET(create_psv_out, out, psv0_index);
358         default:
359                 /* The entry must match to one of the devx_is_obj_create_cmd */
360                 WARN_ON(true);
361                 return 0;
362         }
363 }
364
365 static u64 devx_get_obj_id(const void *in)
366 {
367         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
368         u64 obj_id;
369
370         switch (opcode) {
371         case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
372         case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
373                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
374                                         MLX5_GET(general_obj_in_cmd_hdr, in,
375                                                  obj_type) << 16,
376                                         MLX5_GET(general_obj_in_cmd_hdr, in,
377                                                  obj_id));
378                 break;
379         case MLX5_CMD_OP_QUERY_MKEY:
380                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
381                                         MLX5_GET(query_mkey_in, in,
382                                                  mkey_index));
383                 break;
384         case MLX5_CMD_OP_QUERY_CQ:
385                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
386                                         MLX5_GET(query_cq_in, in, cqn));
387                 break;
388         case MLX5_CMD_OP_MODIFY_CQ:
389                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
390                                         MLX5_GET(modify_cq_in, in, cqn));
391                 break;
392         case MLX5_CMD_OP_QUERY_SQ:
393                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
394                                         MLX5_GET(query_sq_in, in, sqn));
395                 break;
396         case MLX5_CMD_OP_MODIFY_SQ:
397                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
398                                         MLX5_GET(modify_sq_in, in, sqn));
399                 break;
400         case MLX5_CMD_OP_QUERY_RQ:
401                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
402                                         MLX5_GET(query_rq_in, in, rqn));
403                 break;
404         case MLX5_CMD_OP_MODIFY_RQ:
405                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
406                                         MLX5_GET(modify_rq_in, in, rqn));
407                 break;
408         case MLX5_CMD_OP_QUERY_RMP:
409                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
410                                         MLX5_GET(query_rmp_in, in, rmpn));
411                 break;
412         case MLX5_CMD_OP_MODIFY_RMP:
413                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
414                                         MLX5_GET(modify_rmp_in, in, rmpn));
415                 break;
416         case MLX5_CMD_OP_QUERY_RQT:
417                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
418                                         MLX5_GET(query_rqt_in, in, rqtn));
419                 break;
420         case MLX5_CMD_OP_MODIFY_RQT:
421                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
422                                         MLX5_GET(modify_rqt_in, in, rqtn));
423                 break;
424         case MLX5_CMD_OP_QUERY_TIR:
425                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
426                                         MLX5_GET(query_tir_in, in, tirn));
427                 break;
428         case MLX5_CMD_OP_MODIFY_TIR:
429                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
430                                         MLX5_GET(modify_tir_in, in, tirn));
431                 break;
432         case MLX5_CMD_OP_QUERY_TIS:
433                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
434                                         MLX5_GET(query_tis_in, in, tisn));
435                 break;
436         case MLX5_CMD_OP_MODIFY_TIS:
437                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
438                                         MLX5_GET(modify_tis_in, in, tisn));
439                 break;
440         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
441                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
442                                         MLX5_GET(query_flow_table_in, in,
443                                                  table_id));
444                 break;
445         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
446                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
447                                         MLX5_GET(modify_flow_table_in, in,
448                                                  table_id));
449                 break;
450         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
451                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
452                                         MLX5_GET(query_flow_group_in, in,
453                                                  group_id));
454                 break;
455         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
456                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
457                                         MLX5_GET(query_fte_in, in,
458                                                  flow_index));
459                 break;
460         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
461                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
462                                         MLX5_GET(set_fte_in, in, flow_index));
463                 break;
464         case MLX5_CMD_OP_QUERY_Q_COUNTER:
465                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
466                                         MLX5_GET(query_q_counter_in, in,
467                                                  counter_set_id));
468                 break;
469         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
470                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
471                                         MLX5_GET(query_flow_counter_in, in,
472                                                  flow_counter_id));
473                 break;
474         case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
475                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
476                                         MLX5_GET(query_modify_header_context_in,
477                                                  in, modify_header_id));
478                 break;
479         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
480                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
481                                         MLX5_GET(query_scheduling_element_in,
482                                                  in, scheduling_element_id));
483                 break;
484         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
485                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
486                                         MLX5_GET(modify_scheduling_element_in,
487                                                  in, scheduling_element_id));
488                 break;
489         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
490                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
491                                         MLX5_GET(add_vxlan_udp_dport_in, in,
492                                                  vxlan_udp_port));
493                 break;
494         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
495                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
496                                         MLX5_GET(query_l2_table_entry_in, in,
497                                                  table_index));
498                 break;
499         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
500                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
501                                         MLX5_GET(set_l2_table_entry_in, in,
502                                                  table_index));
503                 break;
504         case MLX5_CMD_OP_QUERY_QP:
505                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506                                         MLX5_GET(query_qp_in, in, qpn));
507                 break;
508         case MLX5_CMD_OP_RST2INIT_QP:
509                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510                                         MLX5_GET(rst2init_qp_in, in, qpn));
511                 break;
512         case MLX5_CMD_OP_INIT2INIT_QP:
513                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514                                         MLX5_GET(init2init_qp_in, in, qpn));
515                 break;
516         case MLX5_CMD_OP_INIT2RTR_QP:
517                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518                                         MLX5_GET(init2rtr_qp_in, in, qpn));
519                 break;
520         case MLX5_CMD_OP_RTR2RTS_QP:
521                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
522                                         MLX5_GET(rtr2rts_qp_in, in, qpn));
523                 break;
524         case MLX5_CMD_OP_RTS2RTS_QP:
525                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
526                                         MLX5_GET(rts2rts_qp_in, in, qpn));
527                 break;
528         case MLX5_CMD_OP_SQERR2RTS_QP:
529                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
530                                         MLX5_GET(sqerr2rts_qp_in, in, qpn));
531                 break;
532         case MLX5_CMD_OP_2ERR_QP:
533                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
534                                         MLX5_GET(qp_2err_in, in, qpn));
535                 break;
536         case MLX5_CMD_OP_2RST_QP:
537                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
538                                         MLX5_GET(qp_2rst_in, in, qpn));
539                 break;
540         case MLX5_CMD_OP_QUERY_DCT:
541                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
542                                         MLX5_GET(query_dct_in, in, dctn));
543                 break;
544         case MLX5_CMD_OP_QUERY_XRQ:
545         case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
546         case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
547                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
548                                         MLX5_GET(query_xrq_in, in, xrqn));
549                 break;
550         case MLX5_CMD_OP_QUERY_XRC_SRQ:
551                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
552                                         MLX5_GET(query_xrc_srq_in, in,
553                                                  xrc_srqn));
554                 break;
555         case MLX5_CMD_OP_ARM_XRC_SRQ:
556                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
557                                         MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
558                 break;
559         case MLX5_CMD_OP_QUERY_SRQ:
560                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
561                                         MLX5_GET(query_srq_in, in, srqn));
562                 break;
563         case MLX5_CMD_OP_ARM_RQ:
564                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
565                                         MLX5_GET(arm_rq_in, in, srq_number));
566                 break;
567         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
568                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
569                                         MLX5_GET(drain_dct_in, in, dctn));
570                 break;
571         case MLX5_CMD_OP_ARM_XRQ:
572         case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
573         case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
574         case MLX5_CMD_OP_MODIFY_XRQ:
575                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
576                                         MLX5_GET(arm_xrq_in, in, xrqn));
577                 break;
578         case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
579                 obj_id = get_enc_obj_id
580                                 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
581                                  MLX5_GET(query_packet_reformat_context_in,
582                                           in, packet_reformat_id));
583                 break;
584         default:
585                 obj_id = 0;
586         }
587
588         return obj_id;
589 }
590
591 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
592                                  struct ib_uobject *uobj, const void *in)
593 {
594         struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
595         u64 obj_id = devx_get_obj_id(in);
596
597         if (!obj_id)
598                 return false;
599
600         switch (uobj_get_object_id(uobj)) {
601         case UVERBS_OBJECT_CQ:
602                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
603                                       to_mcq(uobj->object)->mcq.cqn) ==
604                                       obj_id;
605
606         case UVERBS_OBJECT_SRQ:
607         {
608                 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
609                 u16 opcode;
610
611                 switch (srq->common.res) {
612                 case MLX5_RES_XSRQ:
613                         opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
614                         break;
615                 case MLX5_RES_XRQ:
616                         opcode = MLX5_CMD_OP_CREATE_XRQ;
617                         break;
618                 default:
619                         if (!dev->mdev->issi)
620                                 opcode = MLX5_CMD_OP_CREATE_SRQ;
621                         else
622                                 opcode = MLX5_CMD_OP_CREATE_RMP;
623                 }
624
625                 return get_enc_obj_id(opcode,
626                                       to_msrq(uobj->object)->msrq.srqn) ==
627                                       obj_id;
628         }
629
630         case UVERBS_OBJECT_QP:
631         {
632                 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
633
634                 if (qp->type == IB_QPT_RAW_PACKET ||
635                     (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
636                         struct mlx5_ib_raw_packet_qp *raw_packet_qp =
637                                                          &qp->raw_packet_qp;
638                         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
639                         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
640
641                         return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
642                                                rq->base.mqp.qpn) == obj_id ||
643                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
644                                                sq->base.mqp.qpn) == obj_id ||
645                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
646                                                rq->tirn) == obj_id ||
647                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
648                                                sq->tisn) == obj_id);
649                 }
650
651                 if (qp->type == MLX5_IB_QPT_DCT)
652                         return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
653                                               qp->dct.mdct.mqp.qpn) == obj_id;
654                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
655                                       qp->ibqp.qp_num) == obj_id;
656         }
657
658         case UVERBS_OBJECT_WQ:
659                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
660                                       to_mrwq(uobj->object)->core_qp.qpn) ==
661                                       obj_id;
662
663         case UVERBS_OBJECT_RWQ_IND_TBL:
664                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
665                                       to_mrwq_ind_table(uobj->object)->rqtn) ==
666                                       obj_id;
667
668         case MLX5_IB_OBJECT_DEVX_OBJ:
669                 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
670
671         default:
672                 return false;
673         }
674 }
675
676 static void devx_set_umem_valid(const void *in)
677 {
678         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
679
680         switch (opcode) {
681         case MLX5_CMD_OP_CREATE_MKEY:
682                 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
683                 break;
684         case MLX5_CMD_OP_CREATE_CQ:
685         {
686                 void *cqc;
687
688                 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
689                 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
690                 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
691                 break;
692         }
693         case MLX5_CMD_OP_CREATE_QP:
694         {
695                 void *qpc;
696
697                 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
698                 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
699                 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
700                 break;
701         }
702
703         case MLX5_CMD_OP_CREATE_RQ:
704         {
705                 void *rqc, *wq;
706
707                 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
708                 wq  = MLX5_ADDR_OF(rqc, rqc, wq);
709                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
710                 MLX5_SET(wq, wq, wq_umem_valid, 1);
711                 break;
712         }
713
714         case MLX5_CMD_OP_CREATE_SQ:
715         {
716                 void *sqc, *wq;
717
718                 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
719                 wq = MLX5_ADDR_OF(sqc, sqc, wq);
720                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
721                 MLX5_SET(wq, wq, wq_umem_valid, 1);
722                 break;
723         }
724
725         case MLX5_CMD_OP_MODIFY_CQ:
726                 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
727                 break;
728
729         case MLX5_CMD_OP_CREATE_RMP:
730         {
731                 void *rmpc, *wq;
732
733                 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
734                 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
735                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
736                 MLX5_SET(wq, wq, wq_umem_valid, 1);
737                 break;
738         }
739
740         case MLX5_CMD_OP_CREATE_XRQ:
741         {
742                 void *xrqc, *wq;
743
744                 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
745                 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
746                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
747                 MLX5_SET(wq, wq, wq_umem_valid, 1);
748                 break;
749         }
750
751         case MLX5_CMD_OP_CREATE_XRC_SRQ:
752         {
753                 void *xrc_srqc;
754
755                 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
756                 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
757                                         xrc_srq_context_entry);
758                 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
759                 break;
760         }
761
762         default:
763                 return;
764         }
765 }
766
767 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
768 {
769         *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
770
771         switch (*opcode) {
772         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
773         case MLX5_CMD_OP_CREATE_MKEY:
774         case MLX5_CMD_OP_CREATE_CQ:
775         case MLX5_CMD_OP_ALLOC_PD:
776         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
777         case MLX5_CMD_OP_CREATE_RMP:
778         case MLX5_CMD_OP_CREATE_SQ:
779         case MLX5_CMD_OP_CREATE_RQ:
780         case MLX5_CMD_OP_CREATE_RQT:
781         case MLX5_CMD_OP_CREATE_TIR:
782         case MLX5_CMD_OP_CREATE_TIS:
783         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
784         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
785         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
786         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
787         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
788         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
789         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
790         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
791         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
792         case MLX5_CMD_OP_CREATE_QP:
793         case MLX5_CMD_OP_CREATE_SRQ:
794         case MLX5_CMD_OP_CREATE_XRC_SRQ:
795         case MLX5_CMD_OP_CREATE_DCT:
796         case MLX5_CMD_OP_CREATE_XRQ:
797         case MLX5_CMD_OP_ATTACH_TO_MCG:
798         case MLX5_CMD_OP_ALLOC_XRCD:
799                 return true;
800         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
801         {
802                 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
803                 if (op_mod == 0)
804                         return true;
805                 return false;
806         }
807         case MLX5_CMD_OP_CREATE_PSV:
808         {
809                 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
810
811                 if (num_psv == 1)
812                         return true;
813                 return false;
814         }
815         default:
816                 return false;
817         }
818 }
819
820 static bool devx_is_obj_modify_cmd(const void *in)
821 {
822         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
823
824         switch (opcode) {
825         case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
826         case MLX5_CMD_OP_MODIFY_CQ:
827         case MLX5_CMD_OP_MODIFY_RMP:
828         case MLX5_CMD_OP_MODIFY_SQ:
829         case MLX5_CMD_OP_MODIFY_RQ:
830         case MLX5_CMD_OP_MODIFY_RQT:
831         case MLX5_CMD_OP_MODIFY_TIR:
832         case MLX5_CMD_OP_MODIFY_TIS:
833         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
834         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
835         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
836         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
837         case MLX5_CMD_OP_RST2INIT_QP:
838         case MLX5_CMD_OP_INIT2RTR_QP:
839         case MLX5_CMD_OP_INIT2INIT_QP:
840         case MLX5_CMD_OP_RTR2RTS_QP:
841         case MLX5_CMD_OP_RTS2RTS_QP:
842         case MLX5_CMD_OP_SQERR2RTS_QP:
843         case MLX5_CMD_OP_2ERR_QP:
844         case MLX5_CMD_OP_2RST_QP:
845         case MLX5_CMD_OP_ARM_XRC_SRQ:
846         case MLX5_CMD_OP_ARM_RQ:
847         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
848         case MLX5_CMD_OP_ARM_XRQ:
849         case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
850         case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
851         case MLX5_CMD_OP_MODIFY_XRQ:
852                 return true;
853         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
854         {
855                 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
856
857                 if (op_mod == 1)
858                         return true;
859                 return false;
860         }
861         default:
862                 return false;
863         }
864 }
865
866 static bool devx_is_obj_query_cmd(const void *in)
867 {
868         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
869
870         switch (opcode) {
871         case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
872         case MLX5_CMD_OP_QUERY_MKEY:
873         case MLX5_CMD_OP_QUERY_CQ:
874         case MLX5_CMD_OP_QUERY_RMP:
875         case MLX5_CMD_OP_QUERY_SQ:
876         case MLX5_CMD_OP_QUERY_RQ:
877         case MLX5_CMD_OP_QUERY_RQT:
878         case MLX5_CMD_OP_QUERY_TIR:
879         case MLX5_CMD_OP_QUERY_TIS:
880         case MLX5_CMD_OP_QUERY_Q_COUNTER:
881         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
882         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
883         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
884         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
885         case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
886         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
887         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
888         case MLX5_CMD_OP_QUERY_QP:
889         case MLX5_CMD_OP_QUERY_SRQ:
890         case MLX5_CMD_OP_QUERY_XRC_SRQ:
891         case MLX5_CMD_OP_QUERY_DCT:
892         case MLX5_CMD_OP_QUERY_XRQ:
893         case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
894         case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
895         case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
896                 return true;
897         default:
898                 return false;
899         }
900 }
901
902 static bool devx_is_whitelist_cmd(void *in)
903 {
904         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
905
906         switch (opcode) {
907         case MLX5_CMD_OP_QUERY_HCA_CAP:
908         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
909         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
910                 return true;
911         default:
912                 return false;
913         }
914 }
915
916 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
917 {
918         if (devx_is_whitelist_cmd(cmd_in)) {
919                 struct mlx5_ib_dev *dev;
920
921                 if (c->devx_uid)
922                         return c->devx_uid;
923
924                 dev = to_mdev(c->ibucontext.device);
925                 if (dev->devx_whitelist_uid)
926                         return dev->devx_whitelist_uid;
927
928                 return -EOPNOTSUPP;
929         }
930
931         if (!c->devx_uid)
932                 return -EINVAL;
933
934         return c->devx_uid;
935 }
936
937 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
938 {
939         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
940
941         /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
942         if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
943              MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
944             (opcode >= MLX5_CMD_OP_GENERAL_START &&
945              opcode < MLX5_CMD_OP_GENERAL_END))
946                 return true;
947
948         switch (opcode) {
949         case MLX5_CMD_OP_QUERY_HCA_CAP:
950         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
951         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
952         case MLX5_CMD_OP_QUERY_VPORT_STATE:
953         case MLX5_CMD_OP_QUERY_ADAPTER:
954         case MLX5_CMD_OP_QUERY_ISSI:
955         case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
956         case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
957         case MLX5_CMD_OP_QUERY_VNIC_ENV:
958         case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
959         case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
960         case MLX5_CMD_OP_NOP:
961         case MLX5_CMD_OP_QUERY_CONG_STATUS:
962         case MLX5_CMD_OP_QUERY_CONG_PARAMS:
963         case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
964         case MLX5_CMD_OP_QUERY_LAG:
965                 return true;
966         default:
967                 return false;
968         }
969 }
970
971 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
972         struct uverbs_attr_bundle *attrs)
973 {
974         struct mlx5_ib_ucontext *c;
975         struct mlx5_ib_dev *dev;
976         int user_vector;
977         int dev_eqn;
978         int err;
979
980         if (uverbs_copy_from(&user_vector, attrs,
981                              MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
982                 return -EFAULT;
983
984         c = devx_ufile2uctx(attrs);
985         if (IS_ERR(c))
986                 return PTR_ERR(c);
987         dev = to_mdev(c->ibucontext.device);
988
989         err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
990         if (err < 0)
991                 return err;
992
993         if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
994                            &dev_eqn, sizeof(dev_eqn)))
995                 return -EFAULT;
996
997         return 0;
998 }
999
1000 /*
1001  *Security note:
1002  * The hardware protection mechanism works like this: Each device object that
1003  * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1004  * the device specification manual) upon its creation. Then upon doorbell,
1005  * hardware fetches the object context for which the doorbell was rang, and
1006  * validates that the UAR through which the DB was rang matches the UAR ID
1007  * of the object.
1008  * If no match the doorbell is silently ignored by the hardware. Of course,
1009  * the user cannot ring a doorbell on a UAR that was not mapped to it.
1010  * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1011  * mailboxes (except tagging them with UID), we expose to the user its UAR
1012  * ID, so it can embed it in these objects in the expected specification
1013  * format. So the only thing the user can do is hurt itself by creating a
1014  * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1015  * may ring a doorbell on its objects.
1016  * The consequence of that will be that another user can schedule a QP/SQ
1017  * of the buggy user for execution (just insert it to the hardware schedule
1018  * queue or arm its CQ for event generation), no further harm is expected.
1019  */
1020 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1021         struct uverbs_attr_bundle *attrs)
1022 {
1023         struct mlx5_ib_ucontext *c;
1024         struct mlx5_ib_dev *dev;
1025         u32 user_idx;
1026         s32 dev_idx;
1027
1028         c = devx_ufile2uctx(attrs);
1029         if (IS_ERR(c))
1030                 return PTR_ERR(c);
1031         dev = to_mdev(c->ibucontext.device);
1032
1033         if (uverbs_copy_from(&user_idx, attrs,
1034                              MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1035                 return -EFAULT;
1036
1037         dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1038         if (dev_idx < 0)
1039                 return dev_idx;
1040
1041         if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1042                            &dev_idx, sizeof(dev_idx)))
1043                 return -EFAULT;
1044
1045         return 0;
1046 }
1047
1048 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1049         struct uverbs_attr_bundle *attrs)
1050 {
1051         struct mlx5_ib_ucontext *c;
1052         struct mlx5_ib_dev *dev;
1053         void *cmd_in = uverbs_attr_get_alloced_ptr(
1054                 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1055         int cmd_out_len = uverbs_attr_get_len(attrs,
1056                                         MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1057         void *cmd_out;
1058         int err;
1059         int uid;
1060
1061         c = devx_ufile2uctx(attrs);
1062         if (IS_ERR(c))
1063                 return PTR_ERR(c);
1064         dev = to_mdev(c->ibucontext.device);
1065
1066         uid = devx_get_uid(c, cmd_in);
1067         if (uid < 0)
1068                 return uid;
1069
1070         /* Only white list of some general HCA commands are allowed for this method. */
1071         if (!devx_is_general_cmd(cmd_in, dev))
1072                 return -EINVAL;
1073
1074         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1075         if (IS_ERR(cmd_out))
1076                 return PTR_ERR(cmd_out);
1077
1078         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1079         err = mlx5_cmd_exec(dev->mdev, cmd_in,
1080                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1081                             cmd_out, cmd_out_len);
1082         if (err)
1083                 return err;
1084
1085         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1086                               cmd_out_len);
1087 }
1088
1089 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1090                                        u32 *dinlen,
1091                                        u32 *obj_id)
1092 {
1093         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1094         u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1095
1096         *obj_id = devx_get_created_obj_id(in, out, opcode);
1097         *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1098         MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1099
1100         switch (opcode) {
1101         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1102                 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1103                 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1104                 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1105                          MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1106                 break;
1107
1108         case MLX5_CMD_OP_CREATE_UMEM:
1109                 MLX5_SET(destroy_umem_in, din, opcode,
1110                          MLX5_CMD_OP_DESTROY_UMEM);
1111                 MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1112                 break;
1113         case MLX5_CMD_OP_CREATE_MKEY:
1114                 MLX5_SET(destroy_mkey_in, din, opcode,
1115                          MLX5_CMD_OP_DESTROY_MKEY);
1116                 MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1117                 break;
1118         case MLX5_CMD_OP_CREATE_CQ:
1119                 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1120                 MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1121                 break;
1122         case MLX5_CMD_OP_ALLOC_PD:
1123                 MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1124                 MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1125                 break;
1126         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1127                 MLX5_SET(dealloc_transport_domain_in, din, opcode,
1128                          MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1129                 MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1130                          *obj_id);
1131                 break;
1132         case MLX5_CMD_OP_CREATE_RMP:
1133                 MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1134                 MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1135                 break;
1136         case MLX5_CMD_OP_CREATE_SQ:
1137                 MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1138                 MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1139                 break;
1140         case MLX5_CMD_OP_CREATE_RQ:
1141                 MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1142                 MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1143                 break;
1144         case MLX5_CMD_OP_CREATE_RQT:
1145                 MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1146                 MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1147                 break;
1148         case MLX5_CMD_OP_CREATE_TIR:
1149                 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1150                 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1151                 break;
1152         case MLX5_CMD_OP_CREATE_TIS:
1153                 MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1154                 MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1155                 break;
1156         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1157                 MLX5_SET(dealloc_q_counter_in, din, opcode,
1158                          MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1159                 MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1160                 break;
1161         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1162                 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1163                 MLX5_SET(destroy_flow_table_in, din, other_vport,
1164                          MLX5_GET(create_flow_table_in,  in, other_vport));
1165                 MLX5_SET(destroy_flow_table_in, din, vport_number,
1166                          MLX5_GET(create_flow_table_in,  in, vport_number));
1167                 MLX5_SET(destroy_flow_table_in, din, table_type,
1168                          MLX5_GET(create_flow_table_in,  in, table_type));
1169                 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1170                 MLX5_SET(destroy_flow_table_in, din, opcode,
1171                          MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1172                 break;
1173         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1174                 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1175                 MLX5_SET(destroy_flow_group_in, din, other_vport,
1176                          MLX5_GET(create_flow_group_in, in, other_vport));
1177                 MLX5_SET(destroy_flow_group_in, din, vport_number,
1178                          MLX5_GET(create_flow_group_in, in, vport_number));
1179                 MLX5_SET(destroy_flow_group_in, din, table_type,
1180                          MLX5_GET(create_flow_group_in, in, table_type));
1181                 MLX5_SET(destroy_flow_group_in, din, table_id,
1182                          MLX5_GET(create_flow_group_in, in, table_id));
1183                 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1184                 MLX5_SET(destroy_flow_group_in, din, opcode,
1185                          MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1186                 break;
1187         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1188                 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1189                 MLX5_SET(delete_fte_in, din, other_vport,
1190                          MLX5_GET(set_fte_in,  in, other_vport));
1191                 MLX5_SET(delete_fte_in, din, vport_number,
1192                          MLX5_GET(set_fte_in, in, vport_number));
1193                 MLX5_SET(delete_fte_in, din, table_type,
1194                          MLX5_GET(set_fte_in, in, table_type));
1195                 MLX5_SET(delete_fte_in, din, table_id,
1196                          MLX5_GET(set_fte_in, in, table_id));
1197                 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1198                 MLX5_SET(delete_fte_in, din, opcode,
1199                          MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1200                 break;
1201         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1202                 MLX5_SET(dealloc_flow_counter_in, din, opcode,
1203                          MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1204                 MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1205                          *obj_id);
1206                 break;
1207         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1208                 MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1209                          MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1210                 MLX5_SET(dealloc_packet_reformat_context_in, din,
1211                          packet_reformat_id, *obj_id);
1212                 break;
1213         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1214                 MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1215                          MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1216                 MLX5_SET(dealloc_modify_header_context_in, din,
1217                          modify_header_id, *obj_id);
1218                 break;
1219         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1220                 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1221                 MLX5_SET(destroy_scheduling_element_in, din,
1222                          scheduling_hierarchy,
1223                          MLX5_GET(create_scheduling_element_in, in,
1224                                   scheduling_hierarchy));
1225                 MLX5_SET(destroy_scheduling_element_in, din,
1226                          scheduling_element_id, *obj_id);
1227                 MLX5_SET(destroy_scheduling_element_in, din, opcode,
1228                          MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1229                 break;
1230         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1231                 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1232                 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1233                 MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1234                          MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1235                 break;
1236         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1237                 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1238                 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1239                 MLX5_SET(delete_l2_table_entry_in, din, opcode,
1240                          MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1241                 break;
1242         case MLX5_CMD_OP_CREATE_QP:
1243                 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1244                 MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1245                 break;
1246         case MLX5_CMD_OP_CREATE_SRQ:
1247                 MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1248                 MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1249                 break;
1250         case MLX5_CMD_OP_CREATE_XRC_SRQ:
1251                 MLX5_SET(destroy_xrc_srq_in, din, opcode,
1252                          MLX5_CMD_OP_DESTROY_XRC_SRQ);
1253                 MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1254                 break;
1255         case MLX5_CMD_OP_CREATE_DCT:
1256                 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1257                 MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1258                 break;
1259         case MLX5_CMD_OP_CREATE_XRQ:
1260                 MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1261                 MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1262                 break;
1263         case MLX5_CMD_OP_ATTACH_TO_MCG:
1264                 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1265                 MLX5_SET(detach_from_mcg_in, din, qpn,
1266                          MLX5_GET(attach_to_mcg_in, in, qpn));
1267                 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1268                        MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1269                        MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1270                 MLX5_SET(detach_from_mcg_in, din, opcode,
1271                          MLX5_CMD_OP_DETACH_FROM_MCG);
1272                 MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1273                 break;
1274         case MLX5_CMD_OP_ALLOC_XRCD:
1275                 MLX5_SET(dealloc_xrcd_in, din, opcode,
1276                          MLX5_CMD_OP_DEALLOC_XRCD);
1277                 MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1278                 break;
1279         case MLX5_CMD_OP_CREATE_PSV:
1280                 MLX5_SET(destroy_psv_in, din, opcode,
1281                          MLX5_CMD_OP_DESTROY_PSV);
1282                 MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1283                 break;
1284         default:
1285                 /* The entry must match to one of the devx_is_obj_create_cmd */
1286                 WARN_ON(true);
1287                 break;
1288         }
1289 }
1290
1291 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1292                                      struct mlx5_ib_dev *dev,
1293                                      void *in, void *out)
1294 {
1295         struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1296         struct mlx5_core_mkey *mkey;
1297         void *mkc;
1298         u8 key;
1299
1300         mkey = &devx_mr->mmkey;
1301         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1302         key = MLX5_GET(mkc, mkc, mkey_7_0);
1303         mkey->key = mlx5_idx_to_mkey(
1304                         MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1305         mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1306         mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1307         mkey->size = MLX5_GET64(mkc, mkc, len);
1308         mkey->pd = MLX5_GET(mkc, mkc, pd);
1309         devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1310         init_waitqueue_head(&mkey->wait);
1311
1312         return mlx5r_store_odp_mkey(dev, mkey);
1313 }
1314
1315 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1316                                    struct devx_obj *obj,
1317                                    void *in, int in_len)
1318 {
1319         int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1320                         MLX5_FLD_SZ_BYTES(create_mkey_in,
1321                         memory_key_mkey_entry);
1322         void *mkc;
1323         u8 access_mode;
1324
1325         if (in_len < min_len)
1326                 return -EINVAL;
1327
1328         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1329
1330         access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1331         access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1332
1333         if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1334                 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1335                 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1336                         obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1337                 return 0;
1338         }
1339
1340         MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1341         return 0;
1342 }
1343
1344 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1345                                       struct devx_event_subscription *sub)
1346 {
1347         struct devx_event *event;
1348         struct devx_obj_event *xa_val_level2;
1349
1350         if (sub->is_cleaned)
1351                 return;
1352
1353         sub->is_cleaned = 1;
1354         list_del_rcu(&sub->xa_list);
1355
1356         if (list_empty(&sub->obj_list))
1357                 return;
1358
1359         list_del_rcu(&sub->obj_list);
1360         /* check whether key level 1 for this obj_sub_list is empty */
1361         event = xa_load(&dev->devx_event_table.event_xa,
1362                         sub->xa_key_level1);
1363         WARN_ON(!event);
1364
1365         xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1366         if (list_empty(&xa_val_level2->obj_sub_list)) {
1367                 xa_erase(&event->object_ids,
1368                          sub->xa_key_level2);
1369                 kfree_rcu(xa_val_level2, rcu);
1370         }
1371 }
1372
1373 static int devx_obj_cleanup(struct ib_uobject *uobject,
1374                             enum rdma_remove_reason why,
1375                             struct uverbs_attr_bundle *attrs)
1376 {
1377         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1378         struct mlx5_devx_event_table *devx_event_table;
1379         struct devx_obj *obj = uobject->object;
1380         struct devx_event_subscription *sub_entry, *tmp;
1381         struct mlx5_ib_dev *dev;
1382         int ret;
1383
1384         dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1385         if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1386             xa_erase(&obj->ib_dev->odp_mkeys,
1387                      mlx5_base_mkey(obj->devx_mr.mmkey.key)))
1388                 /*
1389                  * The pagefault_single_data_segment() does commands against
1390                  * the mmkey, we must wait for that to stop before freeing the
1391                  * mkey, as another allocation could get the same mkey #.
1392                  */
1393                 mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
1394
1395         if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1396                 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1397         else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1398                 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1399         else
1400                 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1401                                     obj->dinlen, out, sizeof(out));
1402         if (ret)
1403                 return ret;
1404
1405         devx_event_table = &dev->devx_event_table;
1406
1407         mutex_lock(&devx_event_table->event_xa_lock);
1408         list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1409                 devx_cleanup_subscription(dev, sub_entry);
1410         mutex_unlock(&devx_event_table->event_xa_lock);
1411
1412         kfree(obj);
1413         return ret;
1414 }
1415
1416 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1417 {
1418         struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1419         struct mlx5_devx_event_table *table;
1420         struct devx_event *event;
1421         struct devx_obj_event *obj_event;
1422         u32 obj_id = mcq->cqn;
1423
1424         table = &obj->ib_dev->devx_event_table;
1425         rcu_read_lock();
1426         event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1427         if (!event)
1428                 goto out;
1429
1430         obj_event = xa_load(&event->object_ids, obj_id);
1431         if (!obj_event)
1432                 goto out;
1433
1434         dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1435 out:
1436         rcu_read_unlock();
1437 }
1438
1439 static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1440 {
1441         if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1442             !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1443                 return false;
1444
1445         return true;
1446 }
1447
1448 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1449         struct uverbs_attr_bundle *attrs)
1450 {
1451         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1452         int cmd_out_len =  uverbs_attr_get_len(attrs,
1453                                         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1454         int cmd_in_len = uverbs_attr_get_len(attrs,
1455                                         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1456         void *cmd_out;
1457         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1458                 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1459         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1460                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1461         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1462         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1463         struct devx_obj *obj;
1464         u16 obj_type = 0;
1465         int err;
1466         int uid;
1467         u32 obj_id;
1468         u16 opcode;
1469
1470         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1471                 return -EINVAL;
1472
1473         uid = devx_get_uid(c, cmd_in);
1474         if (uid < 0)
1475                 return uid;
1476
1477         if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1478                 return -EINVAL;
1479
1480         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1481         if (IS_ERR(cmd_out))
1482                 return PTR_ERR(cmd_out);
1483
1484         obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1485         if (!obj)
1486                 return -ENOMEM;
1487
1488         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1489         if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1490                 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1491                 if (err)
1492                         goto obj_free;
1493         } else {
1494                 devx_set_umem_valid(cmd_in);
1495         }
1496
1497         if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1498                 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1499                 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1500                                            cmd_in_len, cmd_out, cmd_out_len);
1501         } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1502                    !is_apu_cq(dev, cmd_in)) {
1503                 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1504                 obj->core_cq.comp = devx_cq_comp;
1505                 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1506                                           cmd_in, cmd_in_len, cmd_out,
1507                                           cmd_out_len);
1508         } else {
1509                 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1510                                     cmd_in_len,
1511                                     cmd_out, cmd_out_len);
1512         }
1513
1514         if (err)
1515                 goto obj_free;
1516
1517         if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1518                 u8 bulk = MLX5_GET(alloc_flow_counter_in,
1519                                    cmd_in,
1520                                    flow_counter_bulk);
1521                 obj->flow_counter_bulk_size = 128UL * bulk;
1522         }
1523
1524         uobj->object = obj;
1525         INIT_LIST_HEAD(&obj->event_sub);
1526         obj->ib_dev = dev;
1527         devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1528                                    &obj_id);
1529         WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1530
1531         err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1532         if (err)
1533                 goto obj_destroy;
1534
1535         if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1536                 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1537         obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1538
1539         if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1540                 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1541                 if (err)
1542                         goto obj_destroy;
1543         }
1544         return 0;
1545
1546 obj_destroy:
1547         if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1548                 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1549         else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1550                 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1551         else
1552                 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1553                               sizeof(out));
1554 obj_free:
1555         kfree(obj);
1556         return err;
1557 }
1558
1559 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1560         struct uverbs_attr_bundle *attrs)
1561 {
1562         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1563         int cmd_out_len = uverbs_attr_get_len(attrs,
1564                                         MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1565         struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1566                                                           MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1567         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1568                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1569         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1570         void *cmd_out;
1571         int err;
1572         int uid;
1573
1574         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1575                 return -EINVAL;
1576
1577         uid = devx_get_uid(c, cmd_in);
1578         if (uid < 0)
1579                 return uid;
1580
1581         if (!devx_is_obj_modify_cmd(cmd_in))
1582                 return -EINVAL;
1583
1584         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1585                 return -EINVAL;
1586
1587         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1588         if (IS_ERR(cmd_out))
1589                 return PTR_ERR(cmd_out);
1590
1591         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1592         devx_set_umem_valid(cmd_in);
1593
1594         err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1595                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1596                             cmd_out, cmd_out_len);
1597         if (err)
1598                 return err;
1599
1600         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1601                               cmd_out, cmd_out_len);
1602 }
1603
1604 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1605         struct uverbs_attr_bundle *attrs)
1606 {
1607         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1608         int cmd_out_len = uverbs_attr_get_len(attrs,
1609                                               MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1610         struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1611                                                           MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1612         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1613                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1614         void *cmd_out;
1615         int err;
1616         int uid;
1617         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1618
1619         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1620                 return -EINVAL;
1621
1622         uid = devx_get_uid(c, cmd_in);
1623         if (uid < 0)
1624                 return uid;
1625
1626         if (!devx_is_obj_query_cmd(cmd_in))
1627                 return -EINVAL;
1628
1629         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1630                 return -EINVAL;
1631
1632         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1633         if (IS_ERR(cmd_out))
1634                 return PTR_ERR(cmd_out);
1635
1636         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1637         err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1638                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1639                             cmd_out, cmd_out_len);
1640         if (err)
1641                 return err;
1642
1643         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1644                               cmd_out, cmd_out_len);
1645 }
1646
1647 struct devx_async_event_queue {
1648         spinlock_t              lock;
1649         wait_queue_head_t       poll_wait;
1650         struct list_head        event_list;
1651         atomic_t                bytes_in_use;
1652         u8                      is_destroyed:1;
1653 };
1654
1655 struct devx_async_cmd_event_file {
1656         struct ib_uobject               uobj;
1657         struct devx_async_event_queue   ev_queue;
1658         struct mlx5_async_ctx           async_ctx;
1659 };
1660
1661 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1662 {
1663         spin_lock_init(&ev_queue->lock);
1664         INIT_LIST_HEAD(&ev_queue->event_list);
1665         init_waitqueue_head(&ev_queue->poll_wait);
1666         atomic_set(&ev_queue->bytes_in_use, 0);
1667         ev_queue->is_destroyed = 0;
1668 }
1669
1670 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1671         struct uverbs_attr_bundle *attrs)
1672 {
1673         struct devx_async_cmd_event_file *ev_file;
1674
1675         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1676                 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1677         struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1678
1679         ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1680                                uobj);
1681         devx_init_event_queue(&ev_file->ev_queue);
1682         mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1683         return 0;
1684 }
1685
1686 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1687         struct uverbs_attr_bundle *attrs)
1688 {
1689         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1690                 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1691         struct devx_async_event_file *ev_file;
1692         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1693                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1694         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1695         u32 flags;
1696         int err;
1697
1698         err = uverbs_get_flags32(&flags, attrs,
1699                 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1700                 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1701
1702         if (err)
1703                 return err;
1704
1705         ev_file = container_of(uobj, struct devx_async_event_file,
1706                                uobj);
1707         spin_lock_init(&ev_file->lock);
1708         INIT_LIST_HEAD(&ev_file->event_list);
1709         init_waitqueue_head(&ev_file->poll_wait);
1710         if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1711                 ev_file->omit_data = 1;
1712         INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1713         ev_file->dev = dev;
1714         get_device(&dev->ib_dev.dev);
1715         return 0;
1716 }
1717
1718 static void devx_query_callback(int status, struct mlx5_async_work *context)
1719 {
1720         struct devx_async_data *async_data =
1721                 container_of(context, struct devx_async_data, cb_work);
1722         struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1723         struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1724         unsigned long flags;
1725
1726         /*
1727          * Note that if the struct devx_async_cmd_event_file uobj begins to be
1728          * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1729          * routine returns, ensuring that it always remains valid here.
1730          */
1731         spin_lock_irqsave(&ev_queue->lock, flags);
1732         list_add_tail(&async_data->list, &ev_queue->event_list);
1733         spin_unlock_irqrestore(&ev_queue->lock, flags);
1734
1735         wake_up_interruptible(&ev_queue->poll_wait);
1736 }
1737
1738 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1739
1740 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1741         struct uverbs_attr_bundle *attrs)
1742 {
1743         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1744                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1745         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1746                                 attrs,
1747                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1748         u16 cmd_out_len;
1749         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1750                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1751         struct ib_uobject *fd_uobj;
1752         int err;
1753         int uid;
1754         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1755         struct devx_async_cmd_event_file *ev_file;
1756         struct devx_async_data *async_data;
1757
1758         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1759                 return -EINVAL;
1760
1761         uid = devx_get_uid(c, cmd_in);
1762         if (uid < 0)
1763                 return uid;
1764
1765         if (!devx_is_obj_query_cmd(cmd_in))
1766                 return -EINVAL;
1767
1768         err = uverbs_get_const(&cmd_out_len, attrs,
1769                                MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1770         if (err)
1771                 return err;
1772
1773         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1774                 return -EINVAL;
1775
1776         fd_uobj = uverbs_attr_get_uobject(attrs,
1777                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1778         if (IS_ERR(fd_uobj))
1779                 return PTR_ERR(fd_uobj);
1780
1781         ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1782                                uobj);
1783
1784         if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1785                         MAX_ASYNC_BYTES_IN_USE) {
1786                 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1787                 return -EAGAIN;
1788         }
1789
1790         async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1791                                           cmd_out_len), GFP_KERNEL);
1792         if (!async_data) {
1793                 err = -ENOMEM;
1794                 goto sub_bytes;
1795         }
1796
1797         err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1798                                MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1799         if (err)
1800                 goto free_async;
1801
1802         async_data->cmd_out_len = cmd_out_len;
1803         async_data->mdev = mdev;
1804         async_data->ev_file = ev_file;
1805
1806         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1807         err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1808                     uverbs_attr_get_len(attrs,
1809                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1810                     async_data->hdr.out_data,
1811                     async_data->cmd_out_len,
1812                     devx_query_callback, &async_data->cb_work);
1813
1814         if (err)
1815                 goto free_async;
1816
1817         return 0;
1818
1819 free_async:
1820         kvfree(async_data);
1821 sub_bytes:
1822         atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1823         return err;
1824 }
1825
1826 static void
1827 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1828                            u32 key_level1,
1829                            bool is_level2,
1830                            u32 key_level2)
1831 {
1832         struct devx_event *event;
1833         struct devx_obj_event *xa_val_level2;
1834
1835         /* Level 1 is valid for future use, no need to free */
1836         if (!is_level2)
1837                 return;
1838
1839         event = xa_load(&devx_event_table->event_xa, key_level1);
1840         WARN_ON(!event);
1841
1842         xa_val_level2 = xa_load(&event->object_ids,
1843                                 key_level2);
1844         if (list_empty(&xa_val_level2->obj_sub_list)) {
1845                 xa_erase(&event->object_ids,
1846                          key_level2);
1847                 kfree_rcu(xa_val_level2, rcu);
1848         }
1849 }
1850
1851 static int
1852 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1853                          u32 key_level1,
1854                          bool is_level2,
1855                          u32 key_level2)
1856 {
1857         struct devx_obj_event *obj_event;
1858         struct devx_event *event;
1859         int err;
1860
1861         event = xa_load(&devx_event_table->event_xa, key_level1);
1862         if (!event) {
1863                 event = kzalloc(sizeof(*event), GFP_KERNEL);
1864                 if (!event)
1865                         return -ENOMEM;
1866
1867                 INIT_LIST_HEAD(&event->unaffiliated_list);
1868                 xa_init(&event->object_ids);
1869
1870                 err = xa_insert(&devx_event_table->event_xa,
1871                                 key_level1,
1872                                 event,
1873                                 GFP_KERNEL);
1874                 if (err) {
1875                         kfree(event);
1876                         return err;
1877                 }
1878         }
1879
1880         if (!is_level2)
1881                 return 0;
1882
1883         obj_event = xa_load(&event->object_ids, key_level2);
1884         if (!obj_event) {
1885                 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1886                 if (!obj_event)
1887                         /* Level1 is valid for future use, no need to free */
1888                         return -ENOMEM;
1889
1890                 err = xa_insert(&event->object_ids,
1891                                 key_level2,
1892                                 obj_event,
1893                                 GFP_KERNEL);
1894                 if (err)
1895                         return err;
1896                 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1897         }
1898
1899         return 0;
1900 }
1901
1902 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1903                                    struct devx_obj *obj)
1904 {
1905         int i;
1906
1907         for (i = 0; i < num_events; i++) {
1908                 if (obj) {
1909                         if (!is_legacy_obj_event_num(event_type_num_list[i]))
1910                                 return false;
1911                 } else if (!is_legacy_unaffiliated_event_num(
1912                                 event_type_num_list[i])) {
1913                         return false;
1914                 }
1915         }
1916
1917         return true;
1918 }
1919
1920 #define MAX_SUPP_EVENT_NUM 255
1921 static bool is_valid_events(struct mlx5_core_dev *dev,
1922                             int num_events, u16 *event_type_num_list,
1923                             struct devx_obj *obj)
1924 {
1925         __be64 *aff_events;
1926         __be64 *unaff_events;
1927         int mask_entry;
1928         int mask_bit;
1929         int i;
1930
1931         if (MLX5_CAP_GEN(dev, event_cap)) {
1932                 aff_events = MLX5_CAP_DEV_EVENT(dev,
1933                                                 user_affiliated_events);
1934                 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1935                                                   user_unaffiliated_events);
1936         } else {
1937                 return is_valid_events_legacy(num_events, event_type_num_list,
1938                                               obj);
1939         }
1940
1941         for (i = 0; i < num_events; i++) {
1942                 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1943                         return false;
1944
1945                 mask_entry = event_type_num_list[i] / 64;
1946                 mask_bit = event_type_num_list[i] % 64;
1947
1948                 if (obj) {
1949                         /* CQ completion */
1950                         if (event_type_num_list[i] == 0)
1951                                 continue;
1952
1953                         if (!(be64_to_cpu(aff_events[mask_entry]) &
1954                                         (1ull << mask_bit)))
1955                                 return false;
1956
1957                         continue;
1958                 }
1959
1960                 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1961                                 (1ull << mask_bit)))
1962                         return false;
1963         }
1964
1965         return true;
1966 }
1967
1968 #define MAX_NUM_EVENTS 16
1969 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1970         struct uverbs_attr_bundle *attrs)
1971 {
1972         struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1973                                 attrs,
1974                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1975         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1976                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1977         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1978         struct ib_uobject *fd_uobj;
1979         struct devx_obj *obj = NULL;
1980         struct devx_async_event_file *ev_file;
1981         struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1982         u16 *event_type_num_list;
1983         struct devx_event_subscription *event_sub, *tmp_sub;
1984         struct list_head sub_list;
1985         int redirect_fd;
1986         bool use_eventfd = false;
1987         int num_events;
1988         int num_alloc_xa_entries = 0;
1989         u16 obj_type = 0;
1990         u64 cookie = 0;
1991         u32 obj_id = 0;
1992         int err;
1993         int i;
1994
1995         if (!c->devx_uid)
1996                 return -EINVAL;
1997
1998         if (!IS_ERR(devx_uobj)) {
1999                 obj = (struct devx_obj *)devx_uobj->object;
2000                 if (obj)
2001                         obj_id = get_dec_obj_id(obj->obj_id);
2002         }
2003
2004         fd_uobj = uverbs_attr_get_uobject(attrs,
2005                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2006         if (IS_ERR(fd_uobj))
2007                 return PTR_ERR(fd_uobj);
2008
2009         ev_file = container_of(fd_uobj, struct devx_async_event_file,
2010                                uobj);
2011
2012         if (uverbs_attr_is_valid(attrs,
2013                                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2014                 err = uverbs_copy_from(&redirect_fd, attrs,
2015                                MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2016                 if (err)
2017                         return err;
2018
2019                 use_eventfd = true;
2020         }
2021
2022         if (uverbs_attr_is_valid(attrs,
2023                                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2024                 if (use_eventfd)
2025                         return -EINVAL;
2026
2027                 err = uverbs_copy_from(&cookie, attrs,
2028                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2029                 if (err)
2030                         return err;
2031         }
2032
2033         num_events = uverbs_attr_ptr_get_array_size(
2034                 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2035                 sizeof(u16));
2036
2037         if (num_events < 0)
2038                 return num_events;
2039
2040         if (num_events > MAX_NUM_EVENTS)
2041                 return -EINVAL;
2042
2043         event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2044                         MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2045
2046         if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2047                 return -EINVAL;
2048
2049         INIT_LIST_HEAD(&sub_list);
2050
2051         /* Protect from concurrent subscriptions to same XA entries to allow
2052          * both to succeed
2053          */
2054         mutex_lock(&devx_event_table->event_xa_lock);
2055         for (i = 0; i < num_events; i++) {
2056                 u32 key_level1;
2057
2058                 if (obj)
2059                         obj_type = get_dec_obj_type(obj,
2060                                                     event_type_num_list[i]);
2061                 key_level1 = event_type_num_list[i] | obj_type << 16;
2062
2063                 err = subscribe_event_xa_alloc(devx_event_table,
2064                                                key_level1,
2065                                                obj,
2066                                                obj_id);
2067                 if (err)
2068                         goto err;
2069
2070                 num_alloc_xa_entries++;
2071                 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2072                 if (!event_sub) {
2073                         err = -ENOMEM;
2074                         goto err;
2075                 }
2076
2077                 list_add_tail(&event_sub->event_list, &sub_list);
2078                 uverbs_uobject_get(&ev_file->uobj);
2079                 if (use_eventfd) {
2080                         event_sub->eventfd =
2081                                 eventfd_ctx_fdget(redirect_fd);
2082
2083                         if (IS_ERR(event_sub->eventfd)) {
2084                                 err = PTR_ERR(event_sub->eventfd);
2085                                 event_sub->eventfd = NULL;
2086                                 goto err;
2087                         }
2088                 }
2089
2090                 event_sub->cookie = cookie;
2091                 event_sub->ev_file = ev_file;
2092                 /* May be needed upon cleanup the devx object/subscription */
2093                 event_sub->xa_key_level1 = key_level1;
2094                 event_sub->xa_key_level2 = obj_id;
2095                 INIT_LIST_HEAD(&event_sub->obj_list);
2096         }
2097
2098         /* Once all the allocations and the XA data insertions were done we
2099          * can go ahead and add all the subscriptions to the relevant lists
2100          * without concern of a failure.
2101          */
2102         list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2103                 struct devx_event *event;
2104                 struct devx_obj_event *obj_event;
2105
2106                 list_del_init(&event_sub->event_list);
2107
2108                 spin_lock_irq(&ev_file->lock);
2109                 list_add_tail_rcu(&event_sub->file_list,
2110                                   &ev_file->subscribed_events_list);
2111                 spin_unlock_irq(&ev_file->lock);
2112
2113                 event = xa_load(&devx_event_table->event_xa,
2114                                 event_sub->xa_key_level1);
2115                 WARN_ON(!event);
2116
2117                 if (!obj) {
2118                         list_add_tail_rcu(&event_sub->xa_list,
2119                                           &event->unaffiliated_list);
2120                         continue;
2121                 }
2122
2123                 obj_event = xa_load(&event->object_ids, obj_id);
2124                 WARN_ON(!obj_event);
2125                 list_add_tail_rcu(&event_sub->xa_list,
2126                                   &obj_event->obj_sub_list);
2127                 list_add_tail_rcu(&event_sub->obj_list,
2128                                   &obj->event_sub);
2129         }
2130
2131         mutex_unlock(&devx_event_table->event_xa_lock);
2132         return 0;
2133
2134 err:
2135         list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2136                 list_del(&event_sub->event_list);
2137
2138                 subscribe_event_xa_dealloc(devx_event_table,
2139                                            event_sub->xa_key_level1,
2140                                            obj,
2141                                            obj_id);
2142
2143                 if (event_sub->eventfd)
2144                         eventfd_ctx_put(event_sub->eventfd);
2145                 uverbs_uobject_put(&event_sub->ev_file->uobj);
2146                 kfree(event_sub);
2147         }
2148
2149         mutex_unlock(&devx_event_table->event_xa_lock);
2150         return err;
2151 }
2152
2153 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2154                          struct uverbs_attr_bundle *attrs,
2155                          struct devx_umem *obj)
2156 {
2157         u64 addr;
2158         size_t size;
2159         u32 access;
2160         int err;
2161
2162         if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2163             uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2164                 return -EFAULT;
2165
2166         err = uverbs_get_flags32(&access, attrs,
2167                                  MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2168                                  IB_ACCESS_LOCAL_WRITE |
2169                                  IB_ACCESS_REMOTE_WRITE |
2170                                  IB_ACCESS_REMOTE_READ);
2171         if (err)
2172                 return err;
2173
2174         err = ib_check_mr_access(&dev->ib_dev, access);
2175         if (err)
2176                 return err;
2177
2178         obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
2179         if (IS_ERR(obj->umem))
2180                 return PTR_ERR(obj->umem);
2181         return 0;
2182 }
2183
2184 static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2185                                                unsigned long pgsz_bitmap)
2186 {
2187         unsigned long page_size;
2188
2189         /* Don't bother checking larger page sizes as offset must be zero and
2190          * total DEVX umem length must be equal to total umem length.
2191          */
2192         pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2193                                          PAGE_SHIFT),
2194                                    MLX5_ADAPTER_PAGE_SHIFT);
2195         if (!pgsz_bitmap)
2196                 return 0;
2197
2198         page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2199         if (!page_size)
2200                 return 0;
2201
2202         /* If the page_size is less than the CPU page size then we can use the
2203          * offset and create a umem which is a subset of the page list.
2204          * For larger page sizes we can't be sure the DMA  list reflects the
2205          * VA so we must ensure that the umem extent is exactly equal to the
2206          * page list. Reduce the page size until one of these cases is true.
2207          */
2208         while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2209                 (umem->length % page_size) != 0) &&
2210                 page_size > PAGE_SIZE)
2211                 page_size /= 2;
2212
2213         return page_size;
2214 }
2215
2216 static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2217                                    struct uverbs_attr_bundle *attrs,
2218                                    struct devx_umem *obj,
2219                                    struct devx_umem_reg_cmd *cmd)
2220 {
2221         unsigned long pgsz_bitmap;
2222         unsigned int page_size;
2223         __be64 *mtt;
2224         void *umem;
2225         int ret;
2226
2227         /*
2228          * If the user does not pass in pgsz_bitmap then the user promises not
2229          * to use umem_offset!=0 in any commands that allocate on top of the
2230          * umem.
2231          *
2232          * If the user wants to use a umem_offset then it must pass in
2233          * pgsz_bitmap which guides the maximum page size and thus maximum
2234          * object alignment inside the umem. See the PRM.
2235          *
2236          * Users are not allowed to use IOVA here, mkeys are not supported on
2237          * umem.
2238          */
2239         ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2240                         MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2241                         GENMASK_ULL(63,
2242                                     min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2243         if (ret)
2244                 return ret;
2245
2246         page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2247         if (!page_size)
2248                 return -EINVAL;
2249
2250         cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2251                      (MLX5_ST_SZ_BYTES(mtt) *
2252                       ib_umem_num_dma_blocks(obj->umem, page_size));
2253         cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2254         if (IS_ERR(cmd->in))
2255                 return PTR_ERR(cmd->in);
2256
2257         umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2258         mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2259
2260         MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2261         MLX5_SET64(umem, umem, num_of_mtt,
2262                    ib_umem_num_dma_blocks(obj->umem, page_size));
2263         MLX5_SET(umem, umem, log_page_size,
2264                  order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2265         MLX5_SET(umem, umem, page_offset,
2266                  ib_umem_dma_offset(obj->umem, page_size));
2267
2268         mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2269                              (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2270                                      MLX5_IB_MTT_READ);
2271         return 0;
2272 }
2273
2274 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2275         struct uverbs_attr_bundle *attrs)
2276 {
2277         struct devx_umem_reg_cmd cmd;
2278         struct devx_umem *obj;
2279         struct ib_uobject *uobj = uverbs_attr_get_uobject(
2280                 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2281         u32 obj_id;
2282         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2283                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2284         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2285         int err;
2286
2287         if (!c->devx_uid)
2288                 return -EINVAL;
2289
2290         obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2291         if (!obj)
2292                 return -ENOMEM;
2293
2294         err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2295         if (err)
2296                 goto err_obj_free;
2297
2298         err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
2299         if (err)
2300                 goto err_umem_release;
2301
2302         MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2303         err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2304                             sizeof(cmd.out));
2305         if (err)
2306                 goto err_umem_release;
2307
2308         obj->mdev = dev->mdev;
2309         uobj->object = obj;
2310         devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2311         uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2312
2313         err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2314                              sizeof(obj_id));
2315         return err;
2316
2317 err_umem_release:
2318         ib_umem_release(obj->umem);
2319 err_obj_free:
2320         kfree(obj);
2321         return err;
2322 }
2323
2324 static int devx_umem_cleanup(struct ib_uobject *uobject,
2325                              enum rdma_remove_reason why,
2326                              struct uverbs_attr_bundle *attrs)
2327 {
2328         struct devx_umem *obj = uobject->object;
2329         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2330         int err;
2331
2332         err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2333         if (err)
2334                 return err;
2335
2336         ib_umem_release(obj->umem);
2337         kfree(obj);
2338         return 0;
2339 }
2340
2341 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2342                                   unsigned long event_type)
2343 {
2344         __be64 *unaff_events;
2345         int mask_entry;
2346         int mask_bit;
2347
2348         if (!MLX5_CAP_GEN(dev, event_cap))
2349                 return is_legacy_unaffiliated_event_num(event_type);
2350
2351         unaff_events = MLX5_CAP_DEV_EVENT(dev,
2352                                           user_unaffiliated_events);
2353         WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2354
2355         mask_entry = event_type / 64;
2356         mask_bit = event_type % 64;
2357
2358         if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2359                 return false;
2360
2361         return true;
2362 }
2363
2364 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2365 {
2366         struct mlx5_eqe *eqe = data;
2367         u32 obj_id = 0;
2368
2369         switch (event_type) {
2370         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2371         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2372         case MLX5_EVENT_TYPE_PATH_MIG:
2373         case MLX5_EVENT_TYPE_COMM_EST:
2374         case MLX5_EVENT_TYPE_SQ_DRAINED:
2375         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2376         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2377         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2378         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2379         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2380                 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2381                 break;
2382         case MLX5_EVENT_TYPE_XRQ_ERROR:
2383                 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2384                 break;
2385         case MLX5_EVENT_TYPE_DCT_DRAINED:
2386         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2387                 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2388                 break;
2389         case MLX5_EVENT_TYPE_CQ_ERROR:
2390                 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2391                 break;
2392         default:
2393                 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2394                 break;
2395         }
2396
2397         return obj_id;
2398 }
2399
2400 static int deliver_event(struct devx_event_subscription *event_sub,
2401                          const void *data)
2402 {
2403         struct devx_async_event_file *ev_file;
2404         struct devx_async_event_data *event_data;
2405         unsigned long flags;
2406
2407         ev_file = event_sub->ev_file;
2408
2409         if (ev_file->omit_data) {
2410                 spin_lock_irqsave(&ev_file->lock, flags);
2411                 if (!list_empty(&event_sub->event_list) ||
2412                     ev_file->is_destroyed) {
2413                         spin_unlock_irqrestore(&ev_file->lock, flags);
2414                         return 0;
2415                 }
2416
2417                 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2418                 spin_unlock_irqrestore(&ev_file->lock, flags);
2419                 wake_up_interruptible(&ev_file->poll_wait);
2420                 return 0;
2421         }
2422
2423         event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2424                              GFP_ATOMIC);
2425         if (!event_data) {
2426                 spin_lock_irqsave(&ev_file->lock, flags);
2427                 ev_file->is_overflow_err = 1;
2428                 spin_unlock_irqrestore(&ev_file->lock, flags);
2429                 return -ENOMEM;
2430         }
2431
2432         event_data->hdr.cookie = event_sub->cookie;
2433         memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2434
2435         spin_lock_irqsave(&ev_file->lock, flags);
2436         if (!ev_file->is_destroyed)
2437                 list_add_tail(&event_data->list, &ev_file->event_list);
2438         else
2439                 kfree(event_data);
2440         spin_unlock_irqrestore(&ev_file->lock, flags);
2441         wake_up_interruptible(&ev_file->poll_wait);
2442
2443         return 0;
2444 }
2445
2446 static void dispatch_event_fd(struct list_head *fd_list,
2447                               const void *data)
2448 {
2449         struct devx_event_subscription *item;
2450
2451         list_for_each_entry_rcu(item, fd_list, xa_list) {
2452                 if (item->eventfd)
2453                         eventfd_signal(item->eventfd, 1);
2454                 else
2455                         deliver_event(item, data);
2456         }
2457 }
2458
2459 static int devx_event_notifier(struct notifier_block *nb,
2460                                unsigned long event_type, void *data)
2461 {
2462         struct mlx5_devx_event_table *table;
2463         struct mlx5_ib_dev *dev;
2464         struct devx_event *event;
2465         struct devx_obj_event *obj_event;
2466         u16 obj_type = 0;
2467         bool is_unaffiliated;
2468         u32 obj_id;
2469
2470         /* Explicit filtering to kernel events which may occur frequently */
2471         if (event_type == MLX5_EVENT_TYPE_CMD ||
2472             event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2473                 return NOTIFY_OK;
2474
2475         table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2476         dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2477         is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2478
2479         if (!is_unaffiliated)
2480                 obj_type = get_event_obj_type(event_type, data);
2481
2482         rcu_read_lock();
2483         event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2484         if (!event) {
2485                 rcu_read_unlock();
2486                 return NOTIFY_DONE;
2487         }
2488
2489         if (is_unaffiliated) {
2490                 dispatch_event_fd(&event->unaffiliated_list, data);
2491                 rcu_read_unlock();
2492                 return NOTIFY_OK;
2493         }
2494
2495         obj_id = devx_get_obj_id_from_event(event_type, data);
2496         obj_event = xa_load(&event->object_ids, obj_id);
2497         if (!obj_event) {
2498                 rcu_read_unlock();
2499                 return NOTIFY_DONE;
2500         }
2501
2502         dispatch_event_fd(&obj_event->obj_sub_list, data);
2503
2504         rcu_read_unlock();
2505         return NOTIFY_OK;
2506 }
2507
2508 int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2509 {
2510         struct mlx5_devx_event_table *table = &dev->devx_event_table;
2511         int uid;
2512
2513         uid = mlx5_ib_devx_create(dev, false);
2514         if (uid > 0) {
2515                 dev->devx_whitelist_uid = uid;
2516                 xa_init(&table->event_xa);
2517                 mutex_init(&table->event_xa_lock);
2518                 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2519                 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2520         }
2521
2522         return 0;
2523 }
2524
2525 void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2526 {
2527         struct mlx5_devx_event_table *table = &dev->devx_event_table;
2528         struct devx_event_subscription *sub, *tmp;
2529         struct devx_event *event;
2530         void *entry;
2531         unsigned long id;
2532
2533         if (dev->devx_whitelist_uid) {
2534                 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2535                 mutex_lock(&dev->devx_event_table.event_xa_lock);
2536                 xa_for_each(&table->event_xa, id, entry) {
2537                         event = entry;
2538                         list_for_each_entry_safe(
2539                                 sub, tmp, &event->unaffiliated_list, xa_list)
2540                                 devx_cleanup_subscription(dev, sub);
2541                         kfree(entry);
2542                 }
2543                 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2544                 xa_destroy(&table->event_xa);
2545
2546                 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2547         }
2548 }
2549
2550 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2551                                          size_t count, loff_t *pos)
2552 {
2553         struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2554         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2555         struct devx_async_data *event;
2556         int ret = 0;
2557         size_t eventsz;
2558
2559         spin_lock_irq(&ev_queue->lock);
2560
2561         while (list_empty(&ev_queue->event_list)) {
2562                 spin_unlock_irq(&ev_queue->lock);
2563
2564                 if (filp->f_flags & O_NONBLOCK)
2565                         return -EAGAIN;
2566
2567                 if (wait_event_interruptible(
2568                             ev_queue->poll_wait,
2569                             (!list_empty(&ev_queue->event_list) ||
2570                              ev_queue->is_destroyed))) {
2571                         return -ERESTARTSYS;
2572                 }
2573
2574                 spin_lock_irq(&ev_queue->lock);
2575                 if (ev_queue->is_destroyed) {
2576                         spin_unlock_irq(&ev_queue->lock);
2577                         return -EIO;
2578                 }
2579         }
2580
2581         event = list_entry(ev_queue->event_list.next,
2582                            struct devx_async_data, list);
2583         eventsz = event->cmd_out_len +
2584                         sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2585
2586         if (eventsz > count) {
2587                 spin_unlock_irq(&ev_queue->lock);
2588                 return -ENOSPC;
2589         }
2590
2591         list_del(ev_queue->event_list.next);
2592         spin_unlock_irq(&ev_queue->lock);
2593
2594         if (copy_to_user(buf, &event->hdr, eventsz))
2595                 ret = -EFAULT;
2596         else
2597                 ret = eventsz;
2598
2599         atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2600         kvfree(event);
2601         return ret;
2602 }
2603
2604 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2605                                               struct poll_table_struct *wait)
2606 {
2607         struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2608         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2609         __poll_t pollflags = 0;
2610
2611         poll_wait(filp, &ev_queue->poll_wait, wait);
2612
2613         spin_lock_irq(&ev_queue->lock);
2614         if (ev_queue->is_destroyed)
2615                 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2616         else if (!list_empty(&ev_queue->event_list))
2617                 pollflags = EPOLLIN | EPOLLRDNORM;
2618         spin_unlock_irq(&ev_queue->lock);
2619
2620         return pollflags;
2621 }
2622
2623 static const struct file_operations devx_async_cmd_event_fops = {
2624         .owner   = THIS_MODULE,
2625         .read    = devx_async_cmd_event_read,
2626         .poll    = devx_async_cmd_event_poll,
2627         .release = uverbs_uobject_fd_release,
2628         .llseek  = no_llseek,
2629 };
2630
2631 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2632                                      size_t count, loff_t *pos)
2633 {
2634         struct devx_async_event_file *ev_file = filp->private_data;
2635         struct devx_event_subscription *event_sub;
2636         struct devx_async_event_data *event;
2637         int ret = 0;
2638         size_t eventsz;
2639         bool omit_data;
2640         void *event_data;
2641
2642         omit_data = ev_file->omit_data;
2643
2644         spin_lock_irq(&ev_file->lock);
2645
2646         if (ev_file->is_overflow_err) {
2647                 ev_file->is_overflow_err = 0;
2648                 spin_unlock_irq(&ev_file->lock);
2649                 return -EOVERFLOW;
2650         }
2651
2652
2653         while (list_empty(&ev_file->event_list)) {
2654                 spin_unlock_irq(&ev_file->lock);
2655
2656                 if (filp->f_flags & O_NONBLOCK)
2657                         return -EAGAIN;
2658
2659                 if (wait_event_interruptible(ev_file->poll_wait,
2660                             (!list_empty(&ev_file->event_list) ||
2661                              ev_file->is_destroyed))) {
2662                         return -ERESTARTSYS;
2663                 }
2664
2665                 spin_lock_irq(&ev_file->lock);
2666                 if (ev_file->is_destroyed) {
2667                         spin_unlock_irq(&ev_file->lock);
2668                         return -EIO;
2669                 }
2670         }
2671
2672         if (omit_data) {
2673                 event_sub = list_first_entry(&ev_file->event_list,
2674                                         struct devx_event_subscription,
2675                                         event_list);
2676                 eventsz = sizeof(event_sub->cookie);
2677                 event_data = &event_sub->cookie;
2678         } else {
2679                 event = list_first_entry(&ev_file->event_list,
2680                                       struct devx_async_event_data, list);
2681                 eventsz = sizeof(struct mlx5_eqe) +
2682                         sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2683                 event_data = &event->hdr;
2684         }
2685
2686         if (eventsz > count) {
2687                 spin_unlock_irq(&ev_file->lock);
2688                 return -EINVAL;
2689         }
2690
2691         if (omit_data)
2692                 list_del_init(&event_sub->event_list);
2693         else
2694                 list_del(&event->list);
2695
2696         spin_unlock_irq(&ev_file->lock);
2697
2698         if (copy_to_user(buf, event_data, eventsz))
2699                 /* This points to an application issue, not a kernel concern */
2700                 ret = -EFAULT;
2701         else
2702                 ret = eventsz;
2703
2704         if (!omit_data)
2705                 kfree(event);
2706         return ret;
2707 }
2708
2709 static __poll_t devx_async_event_poll(struct file *filp,
2710                                       struct poll_table_struct *wait)
2711 {
2712         struct devx_async_event_file *ev_file = filp->private_data;
2713         __poll_t pollflags = 0;
2714
2715         poll_wait(filp, &ev_file->poll_wait, wait);
2716
2717         spin_lock_irq(&ev_file->lock);
2718         if (ev_file->is_destroyed)
2719                 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2720         else if (!list_empty(&ev_file->event_list))
2721                 pollflags = EPOLLIN | EPOLLRDNORM;
2722         spin_unlock_irq(&ev_file->lock);
2723
2724         return pollflags;
2725 }
2726
2727 static void devx_free_subscription(struct rcu_head *rcu)
2728 {
2729         struct devx_event_subscription *event_sub =
2730                 container_of(rcu, struct devx_event_subscription, rcu);
2731
2732         if (event_sub->eventfd)
2733                 eventfd_ctx_put(event_sub->eventfd);
2734         uverbs_uobject_put(&event_sub->ev_file->uobj);
2735         kfree(event_sub);
2736 }
2737
2738 static const struct file_operations devx_async_event_fops = {
2739         .owner   = THIS_MODULE,
2740         .read    = devx_async_event_read,
2741         .poll    = devx_async_event_poll,
2742         .release = uverbs_uobject_fd_release,
2743         .llseek  = no_llseek,
2744 };
2745
2746 static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2747                                               enum rdma_remove_reason why)
2748 {
2749         struct devx_async_cmd_event_file *comp_ev_file =
2750                 container_of(uobj, struct devx_async_cmd_event_file,
2751                              uobj);
2752         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2753         struct devx_async_data *entry, *tmp;
2754
2755         spin_lock_irq(&ev_queue->lock);
2756         ev_queue->is_destroyed = 1;
2757         spin_unlock_irq(&ev_queue->lock);
2758         wake_up_interruptible(&ev_queue->poll_wait);
2759
2760         mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2761
2762         spin_lock_irq(&comp_ev_file->ev_queue.lock);
2763         list_for_each_entry_safe(entry, tmp,
2764                                  &comp_ev_file->ev_queue.event_list, list) {
2765                 list_del(&entry->list);
2766                 kvfree(entry);
2767         }
2768         spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2769 };
2770
2771 static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2772                                           enum rdma_remove_reason why)
2773 {
2774         struct devx_async_event_file *ev_file =
2775                 container_of(uobj, struct devx_async_event_file,
2776                              uobj);
2777         struct devx_event_subscription *event_sub, *event_sub_tmp;
2778         struct mlx5_ib_dev *dev = ev_file->dev;
2779
2780         spin_lock_irq(&ev_file->lock);
2781         ev_file->is_destroyed = 1;
2782
2783         /* free the pending events allocation */
2784         if (ev_file->omit_data) {
2785                 struct devx_event_subscription *event_sub, *tmp;
2786
2787                 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2788                                          event_list)
2789                         list_del_init(&event_sub->event_list);
2790
2791         } else {
2792                 struct devx_async_event_data *entry, *tmp;
2793
2794                 list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2795                                          list) {
2796                         list_del(&entry->list);
2797                         kfree(entry);
2798                 }
2799         }
2800
2801         spin_unlock_irq(&ev_file->lock);
2802         wake_up_interruptible(&ev_file->poll_wait);
2803
2804         mutex_lock(&dev->devx_event_table.event_xa_lock);
2805         /* delete the subscriptions which are related to this FD */
2806         list_for_each_entry_safe(event_sub, event_sub_tmp,
2807                                  &ev_file->subscribed_events_list, file_list) {
2808                 devx_cleanup_subscription(dev, event_sub);
2809                 list_del_rcu(&event_sub->file_list);
2810                 /* subscription may not be used by the read API any more */
2811                 call_rcu(&event_sub->rcu, devx_free_subscription);
2812         }
2813         mutex_unlock(&dev->devx_event_table.event_xa_lock);
2814
2815         put_device(&dev->ib_dev.dev);
2816 };
2817
2818 DECLARE_UVERBS_NAMED_METHOD(
2819         MLX5_IB_METHOD_DEVX_UMEM_REG,
2820         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2821                         MLX5_IB_OBJECT_DEVX_UMEM,
2822                         UVERBS_ACCESS_NEW,
2823                         UA_MANDATORY),
2824         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2825                            UVERBS_ATTR_TYPE(u64),
2826                            UA_MANDATORY),
2827         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2828                            UVERBS_ATTR_TYPE(u64),
2829                            UA_MANDATORY),
2830         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2831                              enum ib_access_flags),
2832         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2833                              u64),
2834         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2835                             UVERBS_ATTR_TYPE(u32),
2836                             UA_MANDATORY));
2837
2838 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2839         MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2840         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2841                         MLX5_IB_OBJECT_DEVX_UMEM,
2842                         UVERBS_ACCESS_DESTROY,
2843                         UA_MANDATORY));
2844
2845 DECLARE_UVERBS_NAMED_METHOD(
2846         MLX5_IB_METHOD_DEVX_QUERY_EQN,
2847         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2848                            UVERBS_ATTR_TYPE(u32),
2849                            UA_MANDATORY),
2850         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2851                             UVERBS_ATTR_TYPE(u32),
2852                             UA_MANDATORY));
2853
2854 DECLARE_UVERBS_NAMED_METHOD(
2855         MLX5_IB_METHOD_DEVX_QUERY_UAR,
2856         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2857                            UVERBS_ATTR_TYPE(u32),
2858                            UA_MANDATORY),
2859         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2860                             UVERBS_ATTR_TYPE(u32),
2861                             UA_MANDATORY));
2862
2863 DECLARE_UVERBS_NAMED_METHOD(
2864         MLX5_IB_METHOD_DEVX_OTHER,
2865         UVERBS_ATTR_PTR_IN(
2866                 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2867                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2868                 UA_MANDATORY,
2869                 UA_ALLOC_AND_COPY),
2870         UVERBS_ATTR_PTR_OUT(
2871                 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2872                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2873                 UA_MANDATORY));
2874
2875 DECLARE_UVERBS_NAMED_METHOD(
2876         MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2877         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2878                         MLX5_IB_OBJECT_DEVX_OBJ,
2879                         UVERBS_ACCESS_NEW,
2880                         UA_MANDATORY),
2881         UVERBS_ATTR_PTR_IN(
2882                 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2883                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2884                 UA_MANDATORY,
2885                 UA_ALLOC_AND_COPY),
2886         UVERBS_ATTR_PTR_OUT(
2887                 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2888                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2889                 UA_MANDATORY));
2890
2891 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2892         MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2893         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2894                         MLX5_IB_OBJECT_DEVX_OBJ,
2895                         UVERBS_ACCESS_DESTROY,
2896                         UA_MANDATORY));
2897
2898 DECLARE_UVERBS_NAMED_METHOD(
2899         MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2900         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2901                         UVERBS_IDR_ANY_OBJECT,
2902                         UVERBS_ACCESS_WRITE,
2903                         UA_MANDATORY),
2904         UVERBS_ATTR_PTR_IN(
2905                 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2906                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2907                 UA_MANDATORY,
2908                 UA_ALLOC_AND_COPY),
2909         UVERBS_ATTR_PTR_OUT(
2910                 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2911                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2912                 UA_MANDATORY));
2913
2914 DECLARE_UVERBS_NAMED_METHOD(
2915         MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2916         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2917                         UVERBS_IDR_ANY_OBJECT,
2918                         UVERBS_ACCESS_READ,
2919                         UA_MANDATORY),
2920         UVERBS_ATTR_PTR_IN(
2921                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2922                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2923                 UA_MANDATORY,
2924                 UA_ALLOC_AND_COPY),
2925         UVERBS_ATTR_PTR_OUT(
2926                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2927                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2928                 UA_MANDATORY));
2929
2930 DECLARE_UVERBS_NAMED_METHOD(
2931         MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2932         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2933                         UVERBS_IDR_ANY_OBJECT,
2934                         UVERBS_ACCESS_READ,
2935                         UA_MANDATORY),
2936         UVERBS_ATTR_PTR_IN(
2937                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2938                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2939                 UA_MANDATORY,
2940                 UA_ALLOC_AND_COPY),
2941         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2942                 u16, UA_MANDATORY),
2943         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2944                 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2945                 UVERBS_ACCESS_READ,
2946                 UA_MANDATORY),
2947         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2948                 UVERBS_ATTR_TYPE(u64),
2949                 UA_MANDATORY));
2950
2951 DECLARE_UVERBS_NAMED_METHOD(
2952         MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2953         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2954                 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2955                 UVERBS_ACCESS_READ,
2956                 UA_MANDATORY),
2957         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2958                 MLX5_IB_OBJECT_DEVX_OBJ,
2959                 UVERBS_ACCESS_READ,
2960                 UA_OPTIONAL),
2961         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2962                 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2963                 UA_MANDATORY,
2964                 UA_ALLOC_AND_COPY),
2965         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2966                 UVERBS_ATTR_TYPE(u64),
2967                 UA_OPTIONAL),
2968         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2969                 UVERBS_ATTR_TYPE(u32),
2970                 UA_OPTIONAL));
2971
2972 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2973                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2974                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2975                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2976                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2977
2978 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2979                             UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2980                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2981                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2982                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2983                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2984                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2985
2986 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2987                             UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2988                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2989                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2990
2991
2992 DECLARE_UVERBS_NAMED_METHOD(
2993         MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2994         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2995                         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2996                         UVERBS_ACCESS_NEW,
2997                         UA_MANDATORY));
2998
2999 DECLARE_UVERBS_NAMED_OBJECT(
3000         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3001         UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3002                              devx_async_cmd_event_destroy_uobj,
3003                              &devx_async_cmd_event_fops, "[devx_async_cmd]",
3004                              O_RDONLY),
3005         &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3006
3007 DECLARE_UVERBS_NAMED_METHOD(
3008         MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3009         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3010                         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3011                         UVERBS_ACCESS_NEW,
3012                         UA_MANDATORY),
3013         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3014                         enum mlx5_ib_uapi_devx_create_event_channel_flags,
3015                         UA_MANDATORY));
3016
3017 DECLARE_UVERBS_NAMED_OBJECT(
3018         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3019         UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3020                              devx_async_event_destroy_uobj,
3021                              &devx_async_event_fops, "[devx_async_event]",
3022                              O_RDONLY),
3023         &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3024
3025 static bool devx_is_supported(struct ib_device *device)
3026 {
3027         struct mlx5_ib_dev *dev = to_mdev(device);
3028
3029         return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3030 }
3031
3032 const struct uapi_definition mlx5_ib_devx_defs[] = {
3033         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3034                 MLX5_IB_OBJECT_DEVX,
3035                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3036         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3037                 MLX5_IB_OBJECT_DEVX_OBJ,
3038                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3039         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3040                 MLX5_IB_OBJECT_DEVX_UMEM,
3041                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3042         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3043                 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3044                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3045         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3046                 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3047                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3048         {},
3049 };