a0b677accd96516cc748084fb630606711ea7e93
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / devx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
4  */
5
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include "mlx5_ib.h"
17 #include "devx.h"
18 #include "qp.h"
19 #include <linux/xarray.h>
20
21 #define UVERBS_MODULE_NAME mlx5_ib
22 #include <rdma/uverbs_named_ioctl.h>
23
24 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
25
26 enum devx_obj_flags {
27         DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
28         DEVX_OBJ_FLAGS_DCT = 1 << 1,
29         DEVX_OBJ_FLAGS_CQ = 1 << 2,
30 };
31
32 struct devx_async_data {
33         struct mlx5_ib_dev *mdev;
34         struct list_head list;
35         struct devx_async_cmd_event_file *ev_file;
36         struct mlx5_async_work cb_work;
37         u16 cmd_out_len;
38         /* must be last field in this structure */
39         struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
40 };
41
42 struct devx_async_event_data {
43         struct list_head list; /* headed in ev_file->event_list */
44         struct mlx5_ib_uapi_devx_async_event_hdr hdr;
45 };
46
47 /* first level XA value data structure */
48 struct devx_event {
49         struct xarray object_ids; /* second XA level, Key = object id */
50         struct list_head unaffiliated_list;
51 };
52
53 /* second level XA value data structure */
54 struct devx_obj_event {
55         struct rcu_head rcu;
56         struct list_head obj_sub_list;
57 };
58
59 struct devx_event_subscription {
60         struct list_head file_list; /* headed in ev_file->
61                                      * subscribed_events_list
62                                      */
63         struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64                                    * devx_obj_event->obj_sub_list
65                                    */
66         struct list_head obj_list; /* headed in devx_object */
67         struct list_head event_list; /* headed in ev_file->event_list or in
68                                       * temp list via subscription
69                                       */
70
71         u8 is_cleaned:1;
72         u32 xa_key_level1;
73         u32 xa_key_level2;
74         struct rcu_head rcu;
75         u64 cookie;
76         struct devx_async_event_file *ev_file;
77         struct eventfd_ctx *eventfd;
78 };
79
80 struct devx_async_event_file {
81         struct ib_uobject uobj;
82         /* Head of events that are subscribed to this FD */
83         struct list_head subscribed_events_list;
84         spinlock_t lock;
85         wait_queue_head_t poll_wait;
86         struct list_head event_list;
87         struct mlx5_ib_dev *dev;
88         u8 omit_data:1;
89         u8 is_overflow_err:1;
90         u8 is_destroyed:1;
91 };
92
93 struct devx_umem {
94         struct mlx5_core_dev            *mdev;
95         struct ib_umem                  *umem;
96         u32                             dinlen;
97         u32                             dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
98 };
99
100 struct devx_umem_reg_cmd {
101         void                            *in;
102         u32                             inlen;
103         u32                             out[MLX5_ST_SZ_DW(create_umem_out)];
104 };
105
106 static struct mlx5_ib_ucontext *
107 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
108 {
109         return to_mucontext(ib_uverbs_get_ucontext(attrs));
110 }
111
112 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
113 {
114         u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
115         u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
116         void *uctx;
117         int err;
118         u16 uid;
119         u32 cap = 0;
120
121         /* 0 means not supported */
122         if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
123                 return -EINVAL;
124
125         uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
126         if (is_user && capable(CAP_NET_RAW) &&
127             (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
128                 cap |= MLX5_UCTX_CAP_RAW_TX;
129         if (is_user && capable(CAP_SYS_RAWIO) &&
130             (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
131              MLX5_UCTX_CAP_INTERNAL_DEV_RES))
132                 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
133
134         MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
135         MLX5_SET(uctx, uctx, cap, cap);
136
137         err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
138         if (err)
139                 return err;
140
141         uid = MLX5_GET(create_uctx_out, out, uid);
142         return uid;
143 }
144
145 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
146 {
147         u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
148         u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
149
150         MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
151         MLX5_SET(destroy_uctx_in, in, uid, uid);
152
153         mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
154 }
155
156 static bool is_legacy_unaffiliated_event_num(u16 event_num)
157 {
158         switch (event_num) {
159         case MLX5_EVENT_TYPE_PORT_CHANGE:
160                 return true;
161         default:
162                 return false;
163         }
164 }
165
166 static bool is_legacy_obj_event_num(u16 event_num)
167 {
168         switch (event_num) {
169         case MLX5_EVENT_TYPE_PATH_MIG:
170         case MLX5_EVENT_TYPE_COMM_EST:
171         case MLX5_EVENT_TYPE_SQ_DRAINED:
172         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
173         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
174         case MLX5_EVENT_TYPE_CQ_ERROR:
175         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
176         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
177         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
178         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
179         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
180         case MLX5_EVENT_TYPE_DCT_DRAINED:
181         case MLX5_EVENT_TYPE_COMP:
182         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
183         case MLX5_EVENT_TYPE_XRQ_ERROR:
184                 return true;
185         default:
186                 return false;
187         }
188 }
189
190 static u16 get_legacy_obj_type(u16 opcode)
191 {
192         switch (opcode) {
193         case MLX5_CMD_OP_CREATE_RQ:
194                 return MLX5_EVENT_QUEUE_TYPE_RQ;
195         case MLX5_CMD_OP_CREATE_QP:
196                 return MLX5_EVENT_QUEUE_TYPE_QP;
197         case MLX5_CMD_OP_CREATE_SQ:
198                 return MLX5_EVENT_QUEUE_TYPE_SQ;
199         case MLX5_CMD_OP_CREATE_DCT:
200                 return MLX5_EVENT_QUEUE_TYPE_DCT;
201         default:
202                 return 0;
203         }
204 }
205
206 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
207 {
208         u16 opcode;
209
210         opcode = (obj->obj_id >> 32) & 0xffff;
211
212         if (is_legacy_obj_event_num(event_num))
213                 return get_legacy_obj_type(opcode);
214
215         switch (opcode) {
216         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
217                 return (obj->obj_id >> 48);
218         case MLX5_CMD_OP_CREATE_RQ:
219                 return MLX5_OBJ_TYPE_RQ;
220         case MLX5_CMD_OP_CREATE_QP:
221                 return MLX5_OBJ_TYPE_QP;
222         case MLX5_CMD_OP_CREATE_SQ:
223                 return MLX5_OBJ_TYPE_SQ;
224         case MLX5_CMD_OP_CREATE_DCT:
225                 return MLX5_OBJ_TYPE_DCT;
226         case MLX5_CMD_OP_CREATE_TIR:
227                 return MLX5_OBJ_TYPE_TIR;
228         case MLX5_CMD_OP_CREATE_TIS:
229                 return MLX5_OBJ_TYPE_TIS;
230         case MLX5_CMD_OP_CREATE_PSV:
231                 return MLX5_OBJ_TYPE_PSV;
232         case MLX5_OBJ_TYPE_MKEY:
233                 return MLX5_OBJ_TYPE_MKEY;
234         case MLX5_CMD_OP_CREATE_RMP:
235                 return MLX5_OBJ_TYPE_RMP;
236         case MLX5_CMD_OP_CREATE_XRC_SRQ:
237                 return MLX5_OBJ_TYPE_XRC_SRQ;
238         case MLX5_CMD_OP_CREATE_XRQ:
239                 return MLX5_OBJ_TYPE_XRQ;
240         case MLX5_CMD_OP_CREATE_RQT:
241                 return MLX5_OBJ_TYPE_RQT;
242         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
243                 return MLX5_OBJ_TYPE_FLOW_COUNTER;
244         case MLX5_CMD_OP_CREATE_CQ:
245                 return MLX5_OBJ_TYPE_CQ;
246         default:
247                 return 0;
248         }
249 }
250
251 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
252 {
253         switch (event_type) {
254         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
255         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
256         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
257         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
258         case MLX5_EVENT_TYPE_PATH_MIG:
259         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
260         case MLX5_EVENT_TYPE_COMM_EST:
261         case MLX5_EVENT_TYPE_SQ_DRAINED:
262         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
263         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
264                 return eqe->data.qp_srq.type;
265         case MLX5_EVENT_TYPE_CQ_ERROR:
266         case MLX5_EVENT_TYPE_XRQ_ERROR:
267                 return 0;
268         case MLX5_EVENT_TYPE_DCT_DRAINED:
269         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
270                 return MLX5_EVENT_QUEUE_TYPE_DCT;
271         default:
272                 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
273         }
274 }
275
276 static u32 get_dec_obj_id(u64 obj_id)
277 {
278         return (obj_id & 0xffffffff);
279 }
280
281 /*
282  * As the obj_id in the firmware is not globally unique the object type
283  * must be considered upon checking for a valid object id.
284  * For that the opcode of the creator command is encoded as part of the obj_id.
285  */
286 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
287 {
288         return ((u64)opcode << 32) | obj_id;
289 }
290
291 static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
292 {
293         switch (opcode) {
294         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
295                 return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
296         case MLX5_CMD_OP_CREATE_UMEM:
297                 return MLX5_GET(create_umem_out, out, umem_id);
298         case MLX5_CMD_OP_CREATE_MKEY:
299                 return MLX5_GET(create_mkey_out, out, mkey_index);
300         case MLX5_CMD_OP_CREATE_CQ:
301                 return MLX5_GET(create_cq_out, out, cqn);
302         case MLX5_CMD_OP_ALLOC_PD:
303                 return MLX5_GET(alloc_pd_out, out, pd);
304         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
305                 return MLX5_GET(alloc_transport_domain_out, out,
306                                 transport_domain);
307         case MLX5_CMD_OP_CREATE_RMP:
308                 return MLX5_GET(create_rmp_out, out, rmpn);
309         case MLX5_CMD_OP_CREATE_SQ:
310                 return MLX5_GET(create_sq_out, out, sqn);
311         case MLX5_CMD_OP_CREATE_RQ:
312                 return MLX5_GET(create_rq_out, out, rqn);
313         case MLX5_CMD_OP_CREATE_RQT:
314                 return MLX5_GET(create_rqt_out, out, rqtn);
315         case MLX5_CMD_OP_CREATE_TIR:
316                 return MLX5_GET(create_tir_out, out, tirn);
317         case MLX5_CMD_OP_CREATE_TIS:
318                 return MLX5_GET(create_tis_out, out, tisn);
319         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
320                 return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
321         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
322                 return MLX5_GET(create_flow_table_out, out, table_id);
323         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
324                 return MLX5_GET(create_flow_group_out, out, group_id);
325         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
326                 return MLX5_GET(set_fte_in, in, flow_index);
327         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
328                 return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
329         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
330                 return MLX5_GET(alloc_packet_reformat_context_out, out,
331                                 packet_reformat_id);
332         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
333                 return MLX5_GET(alloc_modify_header_context_out, out,
334                                 modify_header_id);
335         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
336                 return MLX5_GET(create_scheduling_element_out, out,
337                                 scheduling_element_id);
338         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
339                 return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
340         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
341                 return MLX5_GET(set_l2_table_entry_in, in, table_index);
342         case MLX5_CMD_OP_CREATE_QP:
343                 return MLX5_GET(create_qp_out, out, qpn);
344         case MLX5_CMD_OP_CREATE_SRQ:
345                 return MLX5_GET(create_srq_out, out, srqn);
346         case MLX5_CMD_OP_CREATE_XRC_SRQ:
347                 return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
348         case MLX5_CMD_OP_CREATE_DCT:
349                 return MLX5_GET(create_dct_out, out, dctn);
350         case MLX5_CMD_OP_CREATE_XRQ:
351                 return MLX5_GET(create_xrq_out, out, xrqn);
352         case MLX5_CMD_OP_ATTACH_TO_MCG:
353                 return MLX5_GET(attach_to_mcg_in, in, qpn);
354         case MLX5_CMD_OP_ALLOC_XRCD:
355                 return MLX5_GET(alloc_xrcd_out, out, xrcd);
356         case MLX5_CMD_OP_CREATE_PSV:
357                 return MLX5_GET(create_psv_out, out, psv0_index);
358         default:
359                 /* The entry must match to one of the devx_is_obj_create_cmd */
360                 WARN_ON(true);
361                 return 0;
362         }
363 }
364
365 static u64 devx_get_obj_id(const void *in)
366 {
367         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
368         u64 obj_id;
369
370         switch (opcode) {
371         case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
372         case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
373                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
374                                         MLX5_GET(general_obj_in_cmd_hdr, in,
375                                                  obj_type) << 16,
376                                         MLX5_GET(general_obj_in_cmd_hdr, in,
377                                                  obj_id));
378                 break;
379         case MLX5_CMD_OP_QUERY_MKEY:
380                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
381                                         MLX5_GET(query_mkey_in, in,
382                                                  mkey_index));
383                 break;
384         case MLX5_CMD_OP_QUERY_CQ:
385                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
386                                         MLX5_GET(query_cq_in, in, cqn));
387                 break;
388         case MLX5_CMD_OP_MODIFY_CQ:
389                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
390                                         MLX5_GET(modify_cq_in, in, cqn));
391                 break;
392         case MLX5_CMD_OP_QUERY_SQ:
393                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
394                                         MLX5_GET(query_sq_in, in, sqn));
395                 break;
396         case MLX5_CMD_OP_MODIFY_SQ:
397                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
398                                         MLX5_GET(modify_sq_in, in, sqn));
399                 break;
400         case MLX5_CMD_OP_QUERY_RQ:
401                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
402                                         MLX5_GET(query_rq_in, in, rqn));
403                 break;
404         case MLX5_CMD_OP_MODIFY_RQ:
405                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
406                                         MLX5_GET(modify_rq_in, in, rqn));
407                 break;
408         case MLX5_CMD_OP_QUERY_RMP:
409                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
410                                         MLX5_GET(query_rmp_in, in, rmpn));
411                 break;
412         case MLX5_CMD_OP_MODIFY_RMP:
413                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
414                                         MLX5_GET(modify_rmp_in, in, rmpn));
415                 break;
416         case MLX5_CMD_OP_QUERY_RQT:
417                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
418                                         MLX5_GET(query_rqt_in, in, rqtn));
419                 break;
420         case MLX5_CMD_OP_MODIFY_RQT:
421                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
422                                         MLX5_GET(modify_rqt_in, in, rqtn));
423                 break;
424         case MLX5_CMD_OP_QUERY_TIR:
425                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
426                                         MLX5_GET(query_tir_in, in, tirn));
427                 break;
428         case MLX5_CMD_OP_MODIFY_TIR:
429                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
430                                         MLX5_GET(modify_tir_in, in, tirn));
431                 break;
432         case MLX5_CMD_OP_QUERY_TIS:
433                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
434                                         MLX5_GET(query_tis_in, in, tisn));
435                 break;
436         case MLX5_CMD_OP_MODIFY_TIS:
437                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
438                                         MLX5_GET(modify_tis_in, in, tisn));
439                 break;
440         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
441                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
442                                         MLX5_GET(query_flow_table_in, in,
443                                                  table_id));
444                 break;
445         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
446                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
447                                         MLX5_GET(modify_flow_table_in, in,
448                                                  table_id));
449                 break;
450         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
451                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
452                                         MLX5_GET(query_flow_group_in, in,
453                                                  group_id));
454                 break;
455         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
456                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
457                                         MLX5_GET(query_fte_in, in,
458                                                  flow_index));
459                 break;
460         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
461                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
462                                         MLX5_GET(set_fte_in, in, flow_index));
463                 break;
464         case MLX5_CMD_OP_QUERY_Q_COUNTER:
465                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
466                                         MLX5_GET(query_q_counter_in, in,
467                                                  counter_set_id));
468                 break;
469         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
470                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
471                                         MLX5_GET(query_flow_counter_in, in,
472                                                  flow_counter_id));
473                 break;
474         case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
475                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
476                                         MLX5_GET(query_modify_header_context_in,
477                                                  in, modify_header_id));
478                 break;
479         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
480                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
481                                         MLX5_GET(query_scheduling_element_in,
482                                                  in, scheduling_element_id));
483                 break;
484         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
485                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
486                                         MLX5_GET(modify_scheduling_element_in,
487                                                  in, scheduling_element_id));
488                 break;
489         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
490                 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
491                                         MLX5_GET(add_vxlan_udp_dport_in, in,
492                                                  vxlan_udp_port));
493                 break;
494         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
495                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
496                                         MLX5_GET(query_l2_table_entry_in, in,
497                                                  table_index));
498                 break;
499         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
500                 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
501                                         MLX5_GET(set_l2_table_entry_in, in,
502                                                  table_index));
503                 break;
504         case MLX5_CMD_OP_QUERY_QP:
505                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506                                         MLX5_GET(query_qp_in, in, qpn));
507                 break;
508         case MLX5_CMD_OP_RST2INIT_QP:
509                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510                                         MLX5_GET(rst2init_qp_in, in, qpn));
511                 break;
512         case MLX5_CMD_OP_INIT2INIT_QP:
513                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514                                         MLX5_GET(init2init_qp_in, in, qpn));
515                 break;
516         case MLX5_CMD_OP_INIT2RTR_QP:
517                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518                                         MLX5_GET(init2rtr_qp_in, in, qpn));
519                 break;
520         case MLX5_CMD_OP_RTR2RTS_QP:
521                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
522                                         MLX5_GET(rtr2rts_qp_in, in, qpn));
523                 break;
524         case MLX5_CMD_OP_RTS2RTS_QP:
525                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
526                                         MLX5_GET(rts2rts_qp_in, in, qpn));
527                 break;
528         case MLX5_CMD_OP_SQERR2RTS_QP:
529                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
530                                         MLX5_GET(sqerr2rts_qp_in, in, qpn));
531                 break;
532         case MLX5_CMD_OP_2ERR_QP:
533                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
534                                         MLX5_GET(qp_2err_in, in, qpn));
535                 break;
536         case MLX5_CMD_OP_2RST_QP:
537                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
538                                         MLX5_GET(qp_2rst_in, in, qpn));
539                 break;
540         case MLX5_CMD_OP_QUERY_DCT:
541                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
542                                         MLX5_GET(query_dct_in, in, dctn));
543                 break;
544         case MLX5_CMD_OP_QUERY_XRQ:
545         case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
546         case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
547                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
548                                         MLX5_GET(query_xrq_in, in, xrqn));
549                 break;
550         case MLX5_CMD_OP_QUERY_XRC_SRQ:
551                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
552                                         MLX5_GET(query_xrc_srq_in, in,
553                                                  xrc_srqn));
554                 break;
555         case MLX5_CMD_OP_ARM_XRC_SRQ:
556                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
557                                         MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
558                 break;
559         case MLX5_CMD_OP_QUERY_SRQ:
560                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
561                                         MLX5_GET(query_srq_in, in, srqn));
562                 break;
563         case MLX5_CMD_OP_ARM_RQ:
564                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
565                                         MLX5_GET(arm_rq_in, in, srq_number));
566                 break;
567         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
568                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
569                                         MLX5_GET(drain_dct_in, in, dctn));
570                 break;
571         case MLX5_CMD_OP_ARM_XRQ:
572         case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
573         case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
574         case MLX5_CMD_OP_MODIFY_XRQ:
575                 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
576                                         MLX5_GET(arm_xrq_in, in, xrqn));
577                 break;
578         case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
579                 obj_id = get_enc_obj_id
580                                 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
581                                  MLX5_GET(query_packet_reformat_context_in,
582                                           in, packet_reformat_id));
583                 break;
584         default:
585                 obj_id = 0;
586         }
587
588         return obj_id;
589 }
590
591 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
592                                  struct ib_uobject *uobj, const void *in)
593 {
594         struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
595         u64 obj_id = devx_get_obj_id(in);
596
597         if (!obj_id)
598                 return false;
599
600         switch (uobj_get_object_id(uobj)) {
601         case UVERBS_OBJECT_CQ:
602                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
603                                       to_mcq(uobj->object)->mcq.cqn) ==
604                                       obj_id;
605
606         case UVERBS_OBJECT_SRQ:
607         {
608                 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
609                 u16 opcode;
610
611                 switch (srq->common.res) {
612                 case MLX5_RES_XSRQ:
613                         opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
614                         break;
615                 case MLX5_RES_XRQ:
616                         opcode = MLX5_CMD_OP_CREATE_XRQ;
617                         break;
618                 default:
619                         if (!dev->mdev->issi)
620                                 opcode = MLX5_CMD_OP_CREATE_SRQ;
621                         else
622                                 opcode = MLX5_CMD_OP_CREATE_RMP;
623                 }
624
625                 return get_enc_obj_id(opcode,
626                                       to_msrq(uobj->object)->msrq.srqn) ==
627                                       obj_id;
628         }
629
630         case UVERBS_OBJECT_QP:
631         {
632                 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
633                 enum ib_qp_type qp_type = qp->ibqp.qp_type;
634
635                 if (qp_type == IB_QPT_RAW_PACKET ||
636                     (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
637                         struct mlx5_ib_raw_packet_qp *raw_packet_qp =
638                                                          &qp->raw_packet_qp;
639                         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
640                         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
641
642                         return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
643                                                rq->base.mqp.qpn) == obj_id ||
644                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
645                                                sq->base.mqp.qpn) == obj_id ||
646                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
647                                                rq->tirn) == obj_id ||
648                                 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
649                                                sq->tisn) == obj_id);
650                 }
651
652                 if (qp_type == MLX5_IB_QPT_DCT)
653                         return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
654                                               qp->dct.mdct.mqp.qpn) == obj_id;
655
656                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
657                                       qp->ibqp.qp_num) == obj_id;
658         }
659
660         case UVERBS_OBJECT_WQ:
661                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
662                                       to_mrwq(uobj->object)->core_qp.qpn) ==
663                                       obj_id;
664
665         case UVERBS_OBJECT_RWQ_IND_TBL:
666                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
667                                       to_mrwq_ind_table(uobj->object)->rqtn) ==
668                                       obj_id;
669
670         case MLX5_IB_OBJECT_DEVX_OBJ:
671                 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
672
673         default:
674                 return false;
675         }
676 }
677
678 static void devx_set_umem_valid(const void *in)
679 {
680         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
681
682         switch (opcode) {
683         case MLX5_CMD_OP_CREATE_MKEY:
684                 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
685                 break;
686         case MLX5_CMD_OP_CREATE_CQ:
687         {
688                 void *cqc;
689
690                 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
691                 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
692                 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
693                 break;
694         }
695         case MLX5_CMD_OP_CREATE_QP:
696         {
697                 void *qpc;
698
699                 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
700                 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
701                 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
702                 break;
703         }
704
705         case MLX5_CMD_OP_CREATE_RQ:
706         {
707                 void *rqc, *wq;
708
709                 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
710                 wq  = MLX5_ADDR_OF(rqc, rqc, wq);
711                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
712                 MLX5_SET(wq, wq, wq_umem_valid, 1);
713                 break;
714         }
715
716         case MLX5_CMD_OP_CREATE_SQ:
717         {
718                 void *sqc, *wq;
719
720                 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
721                 wq = MLX5_ADDR_OF(sqc, sqc, wq);
722                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
723                 MLX5_SET(wq, wq, wq_umem_valid, 1);
724                 break;
725         }
726
727         case MLX5_CMD_OP_MODIFY_CQ:
728                 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
729                 break;
730
731         case MLX5_CMD_OP_CREATE_RMP:
732         {
733                 void *rmpc, *wq;
734
735                 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
736                 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
737                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
738                 MLX5_SET(wq, wq, wq_umem_valid, 1);
739                 break;
740         }
741
742         case MLX5_CMD_OP_CREATE_XRQ:
743         {
744                 void *xrqc, *wq;
745
746                 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
747                 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
748                 MLX5_SET(wq, wq, dbr_umem_valid, 1);
749                 MLX5_SET(wq, wq, wq_umem_valid, 1);
750                 break;
751         }
752
753         case MLX5_CMD_OP_CREATE_XRC_SRQ:
754         {
755                 void *xrc_srqc;
756
757                 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
758                 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
759                                         xrc_srq_context_entry);
760                 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
761                 break;
762         }
763
764         default:
765                 return;
766         }
767 }
768
769 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
770 {
771         *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
772
773         switch (*opcode) {
774         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
775         case MLX5_CMD_OP_CREATE_MKEY:
776         case MLX5_CMD_OP_CREATE_CQ:
777         case MLX5_CMD_OP_ALLOC_PD:
778         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
779         case MLX5_CMD_OP_CREATE_RMP:
780         case MLX5_CMD_OP_CREATE_SQ:
781         case MLX5_CMD_OP_CREATE_RQ:
782         case MLX5_CMD_OP_CREATE_RQT:
783         case MLX5_CMD_OP_CREATE_TIR:
784         case MLX5_CMD_OP_CREATE_TIS:
785         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
786         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
787         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
788         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
789         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
790         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
791         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
792         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
793         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
794         case MLX5_CMD_OP_CREATE_QP:
795         case MLX5_CMD_OP_CREATE_SRQ:
796         case MLX5_CMD_OP_CREATE_XRC_SRQ:
797         case MLX5_CMD_OP_CREATE_DCT:
798         case MLX5_CMD_OP_CREATE_XRQ:
799         case MLX5_CMD_OP_ATTACH_TO_MCG:
800         case MLX5_CMD_OP_ALLOC_XRCD:
801                 return true;
802         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
803         {
804                 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
805                 if (op_mod == 0)
806                         return true;
807                 return false;
808         }
809         case MLX5_CMD_OP_CREATE_PSV:
810         {
811                 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
812
813                 if (num_psv == 1)
814                         return true;
815                 return false;
816         }
817         default:
818                 return false;
819         }
820 }
821
822 static bool devx_is_obj_modify_cmd(const void *in)
823 {
824         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
825
826         switch (opcode) {
827         case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
828         case MLX5_CMD_OP_MODIFY_CQ:
829         case MLX5_CMD_OP_MODIFY_RMP:
830         case MLX5_CMD_OP_MODIFY_SQ:
831         case MLX5_CMD_OP_MODIFY_RQ:
832         case MLX5_CMD_OP_MODIFY_RQT:
833         case MLX5_CMD_OP_MODIFY_TIR:
834         case MLX5_CMD_OP_MODIFY_TIS:
835         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
836         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
837         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
838         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
839         case MLX5_CMD_OP_RST2INIT_QP:
840         case MLX5_CMD_OP_INIT2RTR_QP:
841         case MLX5_CMD_OP_INIT2INIT_QP:
842         case MLX5_CMD_OP_RTR2RTS_QP:
843         case MLX5_CMD_OP_RTS2RTS_QP:
844         case MLX5_CMD_OP_SQERR2RTS_QP:
845         case MLX5_CMD_OP_2ERR_QP:
846         case MLX5_CMD_OP_2RST_QP:
847         case MLX5_CMD_OP_ARM_XRC_SRQ:
848         case MLX5_CMD_OP_ARM_RQ:
849         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
850         case MLX5_CMD_OP_ARM_XRQ:
851         case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
852         case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
853         case MLX5_CMD_OP_MODIFY_XRQ:
854                 return true;
855         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
856         {
857                 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
858
859                 if (op_mod == 1)
860                         return true;
861                 return false;
862         }
863         default:
864                 return false;
865         }
866 }
867
868 static bool devx_is_obj_query_cmd(const void *in)
869 {
870         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
871
872         switch (opcode) {
873         case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
874         case MLX5_CMD_OP_QUERY_MKEY:
875         case MLX5_CMD_OP_QUERY_CQ:
876         case MLX5_CMD_OP_QUERY_RMP:
877         case MLX5_CMD_OP_QUERY_SQ:
878         case MLX5_CMD_OP_QUERY_RQ:
879         case MLX5_CMD_OP_QUERY_RQT:
880         case MLX5_CMD_OP_QUERY_TIR:
881         case MLX5_CMD_OP_QUERY_TIS:
882         case MLX5_CMD_OP_QUERY_Q_COUNTER:
883         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
884         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
885         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
886         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
887         case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
888         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
889         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
890         case MLX5_CMD_OP_QUERY_QP:
891         case MLX5_CMD_OP_QUERY_SRQ:
892         case MLX5_CMD_OP_QUERY_XRC_SRQ:
893         case MLX5_CMD_OP_QUERY_DCT:
894         case MLX5_CMD_OP_QUERY_XRQ:
895         case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
896         case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
897         case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
898                 return true;
899         default:
900                 return false;
901         }
902 }
903
904 static bool devx_is_whitelist_cmd(void *in)
905 {
906         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
907
908         switch (opcode) {
909         case MLX5_CMD_OP_QUERY_HCA_CAP:
910         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
911         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
912                 return true;
913         default:
914                 return false;
915         }
916 }
917
918 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
919 {
920         if (devx_is_whitelist_cmd(cmd_in)) {
921                 struct mlx5_ib_dev *dev;
922
923                 if (c->devx_uid)
924                         return c->devx_uid;
925
926                 dev = to_mdev(c->ibucontext.device);
927                 if (dev->devx_whitelist_uid)
928                         return dev->devx_whitelist_uid;
929
930                 return -EOPNOTSUPP;
931         }
932
933         if (!c->devx_uid)
934                 return -EINVAL;
935
936         return c->devx_uid;
937 }
938
939 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
940 {
941         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
942
943         /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
944         if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
945              MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
946             (opcode >= MLX5_CMD_OP_GENERAL_START &&
947              opcode < MLX5_CMD_OP_GENERAL_END))
948                 return true;
949
950         switch (opcode) {
951         case MLX5_CMD_OP_QUERY_HCA_CAP:
952         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
953         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
954         case MLX5_CMD_OP_QUERY_VPORT_STATE:
955         case MLX5_CMD_OP_QUERY_ADAPTER:
956         case MLX5_CMD_OP_QUERY_ISSI:
957         case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
958         case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
959         case MLX5_CMD_OP_QUERY_VNIC_ENV:
960         case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
961         case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
962         case MLX5_CMD_OP_NOP:
963         case MLX5_CMD_OP_QUERY_CONG_STATUS:
964         case MLX5_CMD_OP_QUERY_CONG_PARAMS:
965         case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
966         case MLX5_CMD_OP_QUERY_LAG:
967                 return true;
968         default:
969                 return false;
970         }
971 }
972
973 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
974         struct uverbs_attr_bundle *attrs)
975 {
976         struct mlx5_ib_ucontext *c;
977         struct mlx5_ib_dev *dev;
978         int user_vector;
979         int dev_eqn;
980         unsigned int irqn;
981         int err;
982
983         if (uverbs_copy_from(&user_vector, attrs,
984                              MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
985                 return -EFAULT;
986
987         c = devx_ufile2uctx(attrs);
988         if (IS_ERR(c))
989                 return PTR_ERR(c);
990         dev = to_mdev(c->ibucontext.device);
991
992         err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
993         if (err < 0)
994                 return err;
995
996         if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
997                            &dev_eqn, sizeof(dev_eqn)))
998                 return -EFAULT;
999
1000         return 0;
1001 }
1002
1003 /*
1004  *Security note:
1005  * The hardware protection mechanism works like this: Each device object that
1006  * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1007  * the device specification manual) upon its creation. Then upon doorbell,
1008  * hardware fetches the object context for which the doorbell was rang, and
1009  * validates that the UAR through which the DB was rang matches the UAR ID
1010  * of the object.
1011  * If no match the doorbell is silently ignored by the hardware. Of course,
1012  * the user cannot ring a doorbell on a UAR that was not mapped to it.
1013  * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1014  * mailboxes (except tagging them with UID), we expose to the user its UAR
1015  * ID, so it can embed it in these objects in the expected specification
1016  * format. So the only thing the user can do is hurt itself by creating a
1017  * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1018  * may ring a doorbell on its objects.
1019  * The consequence of that will be that another user can schedule a QP/SQ
1020  * of the buggy user for execution (just insert it to the hardware schedule
1021  * queue or arm its CQ for event generation), no further harm is expected.
1022  */
1023 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1024         struct uverbs_attr_bundle *attrs)
1025 {
1026         struct mlx5_ib_ucontext *c;
1027         struct mlx5_ib_dev *dev;
1028         u32 user_idx;
1029         s32 dev_idx;
1030
1031         c = devx_ufile2uctx(attrs);
1032         if (IS_ERR(c))
1033                 return PTR_ERR(c);
1034         dev = to_mdev(c->ibucontext.device);
1035
1036         if (uverbs_copy_from(&user_idx, attrs,
1037                              MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1038                 return -EFAULT;
1039
1040         dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1041         if (dev_idx < 0)
1042                 return dev_idx;
1043
1044         if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1045                            &dev_idx, sizeof(dev_idx)))
1046                 return -EFAULT;
1047
1048         return 0;
1049 }
1050
1051 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1052         struct uverbs_attr_bundle *attrs)
1053 {
1054         struct mlx5_ib_ucontext *c;
1055         struct mlx5_ib_dev *dev;
1056         void *cmd_in = uverbs_attr_get_alloced_ptr(
1057                 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1058         int cmd_out_len = uverbs_attr_get_len(attrs,
1059                                         MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1060         void *cmd_out;
1061         int err;
1062         int uid;
1063
1064         c = devx_ufile2uctx(attrs);
1065         if (IS_ERR(c))
1066                 return PTR_ERR(c);
1067         dev = to_mdev(c->ibucontext.device);
1068
1069         uid = devx_get_uid(c, cmd_in);
1070         if (uid < 0)
1071                 return uid;
1072
1073         /* Only white list of some general HCA commands are allowed for this method. */
1074         if (!devx_is_general_cmd(cmd_in, dev))
1075                 return -EINVAL;
1076
1077         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1078         if (IS_ERR(cmd_out))
1079                 return PTR_ERR(cmd_out);
1080
1081         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1082         err = mlx5_cmd_exec(dev->mdev, cmd_in,
1083                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1084                             cmd_out, cmd_out_len);
1085         if (err)
1086                 return err;
1087
1088         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1089                               cmd_out_len);
1090 }
1091
1092 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1093                                        u32 *dinlen,
1094                                        u32 *obj_id)
1095 {
1096         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1097         u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1098
1099         *obj_id = devx_get_created_obj_id(in, out, opcode);
1100         *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1101         MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1102
1103         switch (opcode) {
1104         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1105                 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1106                 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1107                 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1108                          MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1109                 break;
1110
1111         case MLX5_CMD_OP_CREATE_UMEM:
1112                 MLX5_SET(destroy_umem_in, din, opcode,
1113                          MLX5_CMD_OP_DESTROY_UMEM);
1114                 MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1115                 break;
1116         case MLX5_CMD_OP_CREATE_MKEY:
1117                 MLX5_SET(destroy_mkey_in, din, opcode,
1118                          MLX5_CMD_OP_DESTROY_MKEY);
1119                 MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1120                 break;
1121         case MLX5_CMD_OP_CREATE_CQ:
1122                 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1123                 MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1124                 break;
1125         case MLX5_CMD_OP_ALLOC_PD:
1126                 MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1127                 MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1128                 break;
1129         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1130                 MLX5_SET(dealloc_transport_domain_in, din, opcode,
1131                          MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1132                 MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1133                          *obj_id);
1134                 break;
1135         case MLX5_CMD_OP_CREATE_RMP:
1136                 MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1137                 MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1138                 break;
1139         case MLX5_CMD_OP_CREATE_SQ:
1140                 MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1141                 MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1142                 break;
1143         case MLX5_CMD_OP_CREATE_RQ:
1144                 MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1145                 MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1146                 break;
1147         case MLX5_CMD_OP_CREATE_RQT:
1148                 MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1149                 MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1150                 break;
1151         case MLX5_CMD_OP_CREATE_TIR:
1152                 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1153                 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1154                 break;
1155         case MLX5_CMD_OP_CREATE_TIS:
1156                 MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1157                 MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1158                 break;
1159         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1160                 MLX5_SET(dealloc_q_counter_in, din, opcode,
1161                          MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1162                 MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1163                 break;
1164         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1165                 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1166                 MLX5_SET(destroy_flow_table_in, din, other_vport,
1167                          MLX5_GET(create_flow_table_in,  in, other_vport));
1168                 MLX5_SET(destroy_flow_table_in, din, vport_number,
1169                          MLX5_GET(create_flow_table_in,  in, vport_number));
1170                 MLX5_SET(destroy_flow_table_in, din, table_type,
1171                          MLX5_GET(create_flow_table_in,  in, table_type));
1172                 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1173                 MLX5_SET(destroy_flow_table_in, din, opcode,
1174                          MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1175                 break;
1176         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1177                 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1178                 MLX5_SET(destroy_flow_group_in, din, other_vport,
1179                          MLX5_GET(create_flow_group_in, in, other_vport));
1180                 MLX5_SET(destroy_flow_group_in, din, vport_number,
1181                          MLX5_GET(create_flow_group_in, in, vport_number));
1182                 MLX5_SET(destroy_flow_group_in, din, table_type,
1183                          MLX5_GET(create_flow_group_in, in, table_type));
1184                 MLX5_SET(destroy_flow_group_in, din, table_id,
1185                          MLX5_GET(create_flow_group_in, in, table_id));
1186                 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1187                 MLX5_SET(destroy_flow_group_in, din, opcode,
1188                          MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1189                 break;
1190         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1191                 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1192                 MLX5_SET(delete_fte_in, din, other_vport,
1193                          MLX5_GET(set_fte_in,  in, other_vport));
1194                 MLX5_SET(delete_fte_in, din, vport_number,
1195                          MLX5_GET(set_fte_in, in, vport_number));
1196                 MLX5_SET(delete_fte_in, din, table_type,
1197                          MLX5_GET(set_fte_in, in, table_type));
1198                 MLX5_SET(delete_fte_in, din, table_id,
1199                          MLX5_GET(set_fte_in, in, table_id));
1200                 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1201                 MLX5_SET(delete_fte_in, din, opcode,
1202                          MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1203                 break;
1204         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1205                 MLX5_SET(dealloc_flow_counter_in, din, opcode,
1206                          MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1207                 MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1208                          *obj_id);
1209                 break;
1210         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1211                 MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1212                          MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1213                 MLX5_SET(dealloc_packet_reformat_context_in, din,
1214                          packet_reformat_id, *obj_id);
1215                 break;
1216         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1217                 MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1218                          MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1219                 MLX5_SET(dealloc_modify_header_context_in, din,
1220                          modify_header_id, *obj_id);
1221                 break;
1222         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1223                 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1224                 MLX5_SET(destroy_scheduling_element_in, din,
1225                          scheduling_hierarchy,
1226                          MLX5_GET(create_scheduling_element_in, in,
1227                                   scheduling_hierarchy));
1228                 MLX5_SET(destroy_scheduling_element_in, din,
1229                          scheduling_element_id, *obj_id);
1230                 MLX5_SET(destroy_scheduling_element_in, din, opcode,
1231                          MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1232                 break;
1233         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1234                 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1235                 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1236                 MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1237                          MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1238                 break;
1239         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1240                 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1241                 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1242                 MLX5_SET(delete_l2_table_entry_in, din, opcode,
1243                          MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1244                 break;
1245         case MLX5_CMD_OP_CREATE_QP:
1246                 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1247                 MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1248                 break;
1249         case MLX5_CMD_OP_CREATE_SRQ:
1250                 MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1251                 MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1252                 break;
1253         case MLX5_CMD_OP_CREATE_XRC_SRQ:
1254                 MLX5_SET(destroy_xrc_srq_in, din, opcode,
1255                          MLX5_CMD_OP_DESTROY_XRC_SRQ);
1256                 MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1257                 break;
1258         case MLX5_CMD_OP_CREATE_DCT:
1259                 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1260                 MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1261                 break;
1262         case MLX5_CMD_OP_CREATE_XRQ:
1263                 MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1264                 MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1265                 break;
1266         case MLX5_CMD_OP_ATTACH_TO_MCG:
1267                 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1268                 MLX5_SET(detach_from_mcg_in, din, qpn,
1269                          MLX5_GET(attach_to_mcg_in, in, qpn));
1270                 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1271                        MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1272                        MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1273                 MLX5_SET(detach_from_mcg_in, din, opcode,
1274                          MLX5_CMD_OP_DETACH_FROM_MCG);
1275                 MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1276                 break;
1277         case MLX5_CMD_OP_ALLOC_XRCD:
1278                 MLX5_SET(dealloc_xrcd_in, din, opcode,
1279                          MLX5_CMD_OP_DEALLOC_XRCD);
1280                 MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1281                 break;
1282         case MLX5_CMD_OP_CREATE_PSV:
1283                 MLX5_SET(destroy_psv_in, din, opcode,
1284                          MLX5_CMD_OP_DESTROY_PSV);
1285                 MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1286                 break;
1287         default:
1288                 /* The entry must match to one of the devx_is_obj_create_cmd */
1289                 WARN_ON(true);
1290                 break;
1291         }
1292 }
1293
1294 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1295                                      struct mlx5_ib_dev *dev,
1296                                      void *in, void *out)
1297 {
1298         struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1299         struct mlx5_core_mkey *mkey;
1300         void *mkc;
1301         u8 key;
1302
1303         mkey = &devx_mr->mmkey;
1304         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1305         key = MLX5_GET(mkc, mkc, mkey_7_0);
1306         mkey->key = mlx5_idx_to_mkey(
1307                         MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1308         mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1309         mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1310         mkey->size = MLX5_GET64(mkc, mkc, len);
1311         mkey->pd = MLX5_GET(mkc, mkc, pd);
1312         devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1313         init_waitqueue_head(&mkey->wait);
1314
1315         return mlx5r_store_odp_mkey(dev, mkey);
1316 }
1317
1318 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1319                                    struct devx_obj *obj,
1320                                    void *in, int in_len)
1321 {
1322         int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1323                         MLX5_FLD_SZ_BYTES(create_mkey_in,
1324                         memory_key_mkey_entry);
1325         void *mkc;
1326         u8 access_mode;
1327
1328         if (in_len < min_len)
1329                 return -EINVAL;
1330
1331         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1332
1333         access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1334         access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1335
1336         if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1337                 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1338                 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1339                         obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1340                 return 0;
1341         }
1342
1343         MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1344         return 0;
1345 }
1346
1347 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1348                                       struct devx_event_subscription *sub)
1349 {
1350         struct devx_event *event;
1351         struct devx_obj_event *xa_val_level2;
1352
1353         if (sub->is_cleaned)
1354                 return;
1355
1356         sub->is_cleaned = 1;
1357         list_del_rcu(&sub->xa_list);
1358
1359         if (list_empty(&sub->obj_list))
1360                 return;
1361
1362         list_del_rcu(&sub->obj_list);
1363         /* check whether key level 1 for this obj_sub_list is empty */
1364         event = xa_load(&dev->devx_event_table.event_xa,
1365                         sub->xa_key_level1);
1366         WARN_ON(!event);
1367
1368         xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1369         if (list_empty(&xa_val_level2->obj_sub_list)) {
1370                 xa_erase(&event->object_ids,
1371                          sub->xa_key_level2);
1372                 kfree_rcu(xa_val_level2, rcu);
1373         }
1374 }
1375
1376 static int devx_obj_cleanup(struct ib_uobject *uobject,
1377                             enum rdma_remove_reason why,
1378                             struct uverbs_attr_bundle *attrs)
1379 {
1380         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1381         struct mlx5_devx_event_table *devx_event_table;
1382         struct devx_obj *obj = uobject->object;
1383         struct devx_event_subscription *sub_entry, *tmp;
1384         struct mlx5_ib_dev *dev;
1385         int ret;
1386
1387         dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1388         if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1389             xa_erase(&obj->ib_dev->odp_mkeys,
1390                      mlx5_base_mkey(obj->devx_mr.mmkey.key)))
1391                 /*
1392                  * The pagefault_single_data_segment() does commands against
1393                  * the mmkey, we must wait for that to stop before freeing the
1394                  * mkey, as another allocation could get the same mkey #.
1395                  */
1396                 mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
1397
1398         if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1399                 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1400         else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1401                 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1402         else
1403                 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1404                                     obj->dinlen, out, sizeof(out));
1405         if (ret)
1406                 return ret;
1407
1408         devx_event_table = &dev->devx_event_table;
1409
1410         mutex_lock(&devx_event_table->event_xa_lock);
1411         list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1412                 devx_cleanup_subscription(dev, sub_entry);
1413         mutex_unlock(&devx_event_table->event_xa_lock);
1414
1415         kfree(obj);
1416         return ret;
1417 }
1418
1419 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1420 {
1421         struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1422         struct mlx5_devx_event_table *table;
1423         struct devx_event *event;
1424         struct devx_obj_event *obj_event;
1425         u32 obj_id = mcq->cqn;
1426
1427         table = &obj->ib_dev->devx_event_table;
1428         rcu_read_lock();
1429         event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1430         if (!event)
1431                 goto out;
1432
1433         obj_event = xa_load(&event->object_ids, obj_id);
1434         if (!obj_event)
1435                 goto out;
1436
1437         dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1438 out:
1439         rcu_read_unlock();
1440 }
1441
1442 static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in)
1443 {
1444         if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1445             !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
1446                       apu_thread_cq))
1447                 return false;
1448
1449         return true;
1450 }
1451
1452 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1453         struct uverbs_attr_bundle *attrs)
1454 {
1455         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1456         int cmd_out_len =  uverbs_attr_get_len(attrs,
1457                                         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1458         int cmd_in_len = uverbs_attr_get_len(attrs,
1459                                         MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1460         void *cmd_out;
1461         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1462                 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1463         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1464                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1465         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1466         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1467         struct devx_obj *obj;
1468         u16 obj_type = 0;
1469         int err;
1470         int uid;
1471         u32 obj_id;
1472         u16 opcode;
1473
1474         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1475                 return -EINVAL;
1476
1477         uid = devx_get_uid(c, cmd_in);
1478         if (uid < 0)
1479                 return uid;
1480
1481         if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1482                 return -EINVAL;
1483
1484         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1485         if (IS_ERR(cmd_out))
1486                 return PTR_ERR(cmd_out);
1487
1488         obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1489         if (!obj)
1490                 return -ENOMEM;
1491
1492         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1493         if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1494                 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1495                 if (err)
1496                         goto obj_free;
1497         } else {
1498                 devx_set_umem_valid(cmd_in);
1499         }
1500
1501         if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1502                 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1503                 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1504                                            cmd_in_len, cmd_out, cmd_out_len);
1505         } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1506                    !is_apu_thread_cq(dev, cmd_in)) {
1507                 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1508                 obj->core_cq.comp = devx_cq_comp;
1509                 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1510                                           cmd_in, cmd_in_len, cmd_out,
1511                                           cmd_out_len);
1512         } else {
1513                 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1514                                     cmd_in_len,
1515                                     cmd_out, cmd_out_len);
1516         }
1517
1518         if (err)
1519                 goto obj_free;
1520
1521         if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1522                 u8 bulk = MLX5_GET(alloc_flow_counter_in,
1523                                    cmd_in,
1524                                    flow_counter_bulk);
1525                 obj->flow_counter_bulk_size = 128UL * bulk;
1526         }
1527
1528         uobj->object = obj;
1529         INIT_LIST_HEAD(&obj->event_sub);
1530         obj->ib_dev = dev;
1531         devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1532                                    &obj_id);
1533         WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1534
1535         err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1536         if (err)
1537                 goto obj_destroy;
1538
1539         if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1540                 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1541         obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1542
1543         if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1544                 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1545                 if (err)
1546                         goto obj_destroy;
1547         }
1548         return 0;
1549
1550 obj_destroy:
1551         if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1552                 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1553         else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1554                 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1555         else
1556                 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1557                               sizeof(out));
1558 obj_free:
1559         kfree(obj);
1560         return err;
1561 }
1562
1563 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1564         struct uverbs_attr_bundle *attrs)
1565 {
1566         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1567         int cmd_out_len = uverbs_attr_get_len(attrs,
1568                                         MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1569         struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1570                                                           MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1571         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1572                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1573         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1574         void *cmd_out;
1575         int err;
1576         int uid;
1577
1578         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1579                 return -EINVAL;
1580
1581         uid = devx_get_uid(c, cmd_in);
1582         if (uid < 0)
1583                 return uid;
1584
1585         if (!devx_is_obj_modify_cmd(cmd_in))
1586                 return -EINVAL;
1587
1588         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1589                 return -EINVAL;
1590
1591         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1592         if (IS_ERR(cmd_out))
1593                 return PTR_ERR(cmd_out);
1594
1595         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1596         devx_set_umem_valid(cmd_in);
1597
1598         err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1599                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1600                             cmd_out, cmd_out_len);
1601         if (err)
1602                 return err;
1603
1604         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1605                               cmd_out, cmd_out_len);
1606 }
1607
1608 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1609         struct uverbs_attr_bundle *attrs)
1610 {
1611         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1612         int cmd_out_len = uverbs_attr_get_len(attrs,
1613                                               MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1614         struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1615                                                           MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1616         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1617                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1618         void *cmd_out;
1619         int err;
1620         int uid;
1621         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1622
1623         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1624                 return -EINVAL;
1625
1626         uid = devx_get_uid(c, cmd_in);
1627         if (uid < 0)
1628                 return uid;
1629
1630         if (!devx_is_obj_query_cmd(cmd_in))
1631                 return -EINVAL;
1632
1633         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1634                 return -EINVAL;
1635
1636         cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1637         if (IS_ERR(cmd_out))
1638                 return PTR_ERR(cmd_out);
1639
1640         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1641         err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1642                             uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1643                             cmd_out, cmd_out_len);
1644         if (err)
1645                 return err;
1646
1647         return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1648                               cmd_out, cmd_out_len);
1649 }
1650
1651 struct devx_async_event_queue {
1652         spinlock_t              lock;
1653         wait_queue_head_t       poll_wait;
1654         struct list_head        event_list;
1655         atomic_t                bytes_in_use;
1656         u8                      is_destroyed:1;
1657 };
1658
1659 struct devx_async_cmd_event_file {
1660         struct ib_uobject               uobj;
1661         struct devx_async_event_queue   ev_queue;
1662         struct mlx5_async_ctx           async_ctx;
1663 };
1664
1665 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1666 {
1667         spin_lock_init(&ev_queue->lock);
1668         INIT_LIST_HEAD(&ev_queue->event_list);
1669         init_waitqueue_head(&ev_queue->poll_wait);
1670         atomic_set(&ev_queue->bytes_in_use, 0);
1671         ev_queue->is_destroyed = 0;
1672 }
1673
1674 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1675         struct uverbs_attr_bundle *attrs)
1676 {
1677         struct devx_async_cmd_event_file *ev_file;
1678
1679         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1680                 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1681         struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1682
1683         ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1684                                uobj);
1685         devx_init_event_queue(&ev_file->ev_queue);
1686         mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1687         return 0;
1688 }
1689
1690 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1691         struct uverbs_attr_bundle *attrs)
1692 {
1693         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1694                 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1695         struct devx_async_event_file *ev_file;
1696         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1697                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1698         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1699         u32 flags;
1700         int err;
1701
1702         err = uverbs_get_flags32(&flags, attrs,
1703                 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1704                 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1705
1706         if (err)
1707                 return err;
1708
1709         ev_file = container_of(uobj, struct devx_async_event_file,
1710                                uobj);
1711         spin_lock_init(&ev_file->lock);
1712         INIT_LIST_HEAD(&ev_file->event_list);
1713         init_waitqueue_head(&ev_file->poll_wait);
1714         if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1715                 ev_file->omit_data = 1;
1716         INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1717         ev_file->dev = dev;
1718         get_device(&dev->ib_dev.dev);
1719         return 0;
1720 }
1721
1722 static void devx_query_callback(int status, struct mlx5_async_work *context)
1723 {
1724         struct devx_async_data *async_data =
1725                 container_of(context, struct devx_async_data, cb_work);
1726         struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1727         struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1728         unsigned long flags;
1729
1730         /*
1731          * Note that if the struct devx_async_cmd_event_file uobj begins to be
1732          * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1733          * routine returns, ensuring that it always remains valid here.
1734          */
1735         spin_lock_irqsave(&ev_queue->lock, flags);
1736         list_add_tail(&async_data->list, &ev_queue->event_list);
1737         spin_unlock_irqrestore(&ev_queue->lock, flags);
1738
1739         wake_up_interruptible(&ev_queue->poll_wait);
1740 }
1741
1742 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1743
1744 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1745         struct uverbs_attr_bundle *attrs)
1746 {
1747         void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1748                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1749         struct ib_uobject *uobj = uverbs_attr_get_uobject(
1750                                 attrs,
1751                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1752         u16 cmd_out_len;
1753         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1754                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1755         struct ib_uobject *fd_uobj;
1756         int err;
1757         int uid;
1758         struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1759         struct devx_async_cmd_event_file *ev_file;
1760         struct devx_async_data *async_data;
1761
1762         if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1763                 return -EINVAL;
1764
1765         uid = devx_get_uid(c, cmd_in);
1766         if (uid < 0)
1767                 return uid;
1768
1769         if (!devx_is_obj_query_cmd(cmd_in))
1770                 return -EINVAL;
1771
1772         err = uverbs_get_const(&cmd_out_len, attrs,
1773                                MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1774         if (err)
1775                 return err;
1776
1777         if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1778                 return -EINVAL;
1779
1780         fd_uobj = uverbs_attr_get_uobject(attrs,
1781                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1782         if (IS_ERR(fd_uobj))
1783                 return PTR_ERR(fd_uobj);
1784
1785         ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1786                                uobj);
1787
1788         if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1789                         MAX_ASYNC_BYTES_IN_USE) {
1790                 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1791                 return -EAGAIN;
1792         }
1793
1794         async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1795                                           cmd_out_len), GFP_KERNEL);
1796         if (!async_data) {
1797                 err = -ENOMEM;
1798                 goto sub_bytes;
1799         }
1800
1801         err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1802                                MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1803         if (err)
1804                 goto free_async;
1805
1806         async_data->cmd_out_len = cmd_out_len;
1807         async_data->mdev = mdev;
1808         async_data->ev_file = ev_file;
1809
1810         MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1811         err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1812                     uverbs_attr_get_len(attrs,
1813                                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1814                     async_data->hdr.out_data,
1815                     async_data->cmd_out_len,
1816                     devx_query_callback, &async_data->cb_work);
1817
1818         if (err)
1819                 goto free_async;
1820
1821         return 0;
1822
1823 free_async:
1824         kvfree(async_data);
1825 sub_bytes:
1826         atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1827         return err;
1828 }
1829
1830 static void
1831 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1832                            u32 key_level1,
1833                            bool is_level2,
1834                            u32 key_level2)
1835 {
1836         struct devx_event *event;
1837         struct devx_obj_event *xa_val_level2;
1838
1839         /* Level 1 is valid for future use, no need to free */
1840         if (!is_level2)
1841                 return;
1842
1843         event = xa_load(&devx_event_table->event_xa, key_level1);
1844         WARN_ON(!event);
1845
1846         xa_val_level2 = xa_load(&event->object_ids,
1847                                 key_level2);
1848         if (list_empty(&xa_val_level2->obj_sub_list)) {
1849                 xa_erase(&event->object_ids,
1850                          key_level2);
1851                 kfree_rcu(xa_val_level2, rcu);
1852         }
1853 }
1854
1855 static int
1856 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1857                          u32 key_level1,
1858                          bool is_level2,
1859                          u32 key_level2)
1860 {
1861         struct devx_obj_event *obj_event;
1862         struct devx_event *event;
1863         int err;
1864
1865         event = xa_load(&devx_event_table->event_xa, key_level1);
1866         if (!event) {
1867                 event = kzalloc(sizeof(*event), GFP_KERNEL);
1868                 if (!event)
1869                         return -ENOMEM;
1870
1871                 INIT_LIST_HEAD(&event->unaffiliated_list);
1872                 xa_init(&event->object_ids);
1873
1874                 err = xa_insert(&devx_event_table->event_xa,
1875                                 key_level1,
1876                                 event,
1877                                 GFP_KERNEL);
1878                 if (err) {
1879                         kfree(event);
1880                         return err;
1881                 }
1882         }
1883
1884         if (!is_level2)
1885                 return 0;
1886
1887         obj_event = xa_load(&event->object_ids, key_level2);
1888         if (!obj_event) {
1889                 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1890                 if (!obj_event)
1891                         /* Level1 is valid for future use, no need to free */
1892                         return -ENOMEM;
1893
1894                 err = xa_insert(&event->object_ids,
1895                                 key_level2,
1896                                 obj_event,
1897                                 GFP_KERNEL);
1898                 if (err)
1899                         return err;
1900                 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1901         }
1902
1903         return 0;
1904 }
1905
1906 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1907                                    struct devx_obj *obj)
1908 {
1909         int i;
1910
1911         for (i = 0; i < num_events; i++) {
1912                 if (obj) {
1913                         if (!is_legacy_obj_event_num(event_type_num_list[i]))
1914                                 return false;
1915                 } else if (!is_legacy_unaffiliated_event_num(
1916                                 event_type_num_list[i])) {
1917                         return false;
1918                 }
1919         }
1920
1921         return true;
1922 }
1923
1924 #define MAX_SUPP_EVENT_NUM 255
1925 static bool is_valid_events(struct mlx5_core_dev *dev,
1926                             int num_events, u16 *event_type_num_list,
1927                             struct devx_obj *obj)
1928 {
1929         __be64 *aff_events;
1930         __be64 *unaff_events;
1931         int mask_entry;
1932         int mask_bit;
1933         int i;
1934
1935         if (MLX5_CAP_GEN(dev, event_cap)) {
1936                 aff_events = MLX5_CAP_DEV_EVENT(dev,
1937                                                 user_affiliated_events);
1938                 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1939                                                   user_unaffiliated_events);
1940         } else {
1941                 return is_valid_events_legacy(num_events, event_type_num_list,
1942                                               obj);
1943         }
1944
1945         for (i = 0; i < num_events; i++) {
1946                 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1947                         return false;
1948
1949                 mask_entry = event_type_num_list[i] / 64;
1950                 mask_bit = event_type_num_list[i] % 64;
1951
1952                 if (obj) {
1953                         /* CQ completion */
1954                         if (event_type_num_list[i] == 0)
1955                                 continue;
1956
1957                         if (!(be64_to_cpu(aff_events[mask_entry]) &
1958                                         (1ull << mask_bit)))
1959                                 return false;
1960
1961                         continue;
1962                 }
1963
1964                 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1965                                 (1ull << mask_bit)))
1966                         return false;
1967         }
1968
1969         return true;
1970 }
1971
1972 #define MAX_NUM_EVENTS 16
1973 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1974         struct uverbs_attr_bundle *attrs)
1975 {
1976         struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1977                                 attrs,
1978                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1979         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1980                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1981         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1982         struct ib_uobject *fd_uobj;
1983         struct devx_obj *obj = NULL;
1984         struct devx_async_event_file *ev_file;
1985         struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1986         u16 *event_type_num_list;
1987         struct devx_event_subscription *event_sub, *tmp_sub;
1988         struct list_head sub_list;
1989         int redirect_fd;
1990         bool use_eventfd = false;
1991         int num_events;
1992         int num_alloc_xa_entries = 0;
1993         u16 obj_type = 0;
1994         u64 cookie = 0;
1995         u32 obj_id = 0;
1996         int err;
1997         int i;
1998
1999         if (!c->devx_uid)
2000                 return -EINVAL;
2001
2002         if (!IS_ERR(devx_uobj)) {
2003                 obj = (struct devx_obj *)devx_uobj->object;
2004                 if (obj)
2005                         obj_id = get_dec_obj_id(obj->obj_id);
2006         }
2007
2008         fd_uobj = uverbs_attr_get_uobject(attrs,
2009                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2010         if (IS_ERR(fd_uobj))
2011                 return PTR_ERR(fd_uobj);
2012
2013         ev_file = container_of(fd_uobj, struct devx_async_event_file,
2014                                uobj);
2015
2016         if (uverbs_attr_is_valid(attrs,
2017                                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2018                 err = uverbs_copy_from(&redirect_fd, attrs,
2019                                MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2020                 if (err)
2021                         return err;
2022
2023                 use_eventfd = true;
2024         }
2025
2026         if (uverbs_attr_is_valid(attrs,
2027                                  MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2028                 if (use_eventfd)
2029                         return -EINVAL;
2030
2031                 err = uverbs_copy_from(&cookie, attrs,
2032                                 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2033                 if (err)
2034                         return err;
2035         }
2036
2037         num_events = uverbs_attr_ptr_get_array_size(
2038                 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2039                 sizeof(u16));
2040
2041         if (num_events < 0)
2042                 return num_events;
2043
2044         if (num_events > MAX_NUM_EVENTS)
2045                 return -EINVAL;
2046
2047         event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2048                         MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2049
2050         if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2051                 return -EINVAL;
2052
2053         INIT_LIST_HEAD(&sub_list);
2054
2055         /* Protect from concurrent subscriptions to same XA entries to allow
2056          * both to succeed
2057          */
2058         mutex_lock(&devx_event_table->event_xa_lock);
2059         for (i = 0; i < num_events; i++) {
2060                 u32 key_level1;
2061
2062                 if (obj)
2063                         obj_type = get_dec_obj_type(obj,
2064                                                     event_type_num_list[i]);
2065                 key_level1 = event_type_num_list[i] | obj_type << 16;
2066
2067                 err = subscribe_event_xa_alloc(devx_event_table,
2068                                                key_level1,
2069                                                obj,
2070                                                obj_id);
2071                 if (err)
2072                         goto err;
2073
2074                 num_alloc_xa_entries++;
2075                 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2076                 if (!event_sub) {
2077                         err = -ENOMEM;
2078                         goto err;
2079                 }
2080
2081                 list_add_tail(&event_sub->event_list, &sub_list);
2082                 uverbs_uobject_get(&ev_file->uobj);
2083                 if (use_eventfd) {
2084                         event_sub->eventfd =
2085                                 eventfd_ctx_fdget(redirect_fd);
2086
2087                         if (IS_ERR(event_sub->eventfd)) {
2088                                 err = PTR_ERR(event_sub->eventfd);
2089                                 event_sub->eventfd = NULL;
2090                                 goto err;
2091                         }
2092                 }
2093
2094                 event_sub->cookie = cookie;
2095                 event_sub->ev_file = ev_file;
2096                 /* May be needed upon cleanup the devx object/subscription */
2097                 event_sub->xa_key_level1 = key_level1;
2098                 event_sub->xa_key_level2 = obj_id;
2099                 INIT_LIST_HEAD(&event_sub->obj_list);
2100         }
2101
2102         /* Once all the allocations and the XA data insertions were done we
2103          * can go ahead and add all the subscriptions to the relevant lists
2104          * without concern of a failure.
2105          */
2106         list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2107                 struct devx_event *event;
2108                 struct devx_obj_event *obj_event;
2109
2110                 list_del_init(&event_sub->event_list);
2111
2112                 spin_lock_irq(&ev_file->lock);
2113                 list_add_tail_rcu(&event_sub->file_list,
2114                                   &ev_file->subscribed_events_list);
2115                 spin_unlock_irq(&ev_file->lock);
2116
2117                 event = xa_load(&devx_event_table->event_xa,
2118                                 event_sub->xa_key_level1);
2119                 WARN_ON(!event);
2120
2121                 if (!obj) {
2122                         list_add_tail_rcu(&event_sub->xa_list,
2123                                           &event->unaffiliated_list);
2124                         continue;
2125                 }
2126
2127                 obj_event = xa_load(&event->object_ids, obj_id);
2128                 WARN_ON(!obj_event);
2129                 list_add_tail_rcu(&event_sub->xa_list,
2130                                   &obj_event->obj_sub_list);
2131                 list_add_tail_rcu(&event_sub->obj_list,
2132                                   &obj->event_sub);
2133         }
2134
2135         mutex_unlock(&devx_event_table->event_xa_lock);
2136         return 0;
2137
2138 err:
2139         list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2140                 list_del(&event_sub->event_list);
2141
2142                 subscribe_event_xa_dealloc(devx_event_table,
2143                                            event_sub->xa_key_level1,
2144                                            obj,
2145                                            obj_id);
2146
2147                 if (event_sub->eventfd)
2148                         eventfd_ctx_put(event_sub->eventfd);
2149                 uverbs_uobject_put(&event_sub->ev_file->uobj);
2150                 kfree(event_sub);
2151         }
2152
2153         mutex_unlock(&devx_event_table->event_xa_lock);
2154         return err;
2155 }
2156
2157 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2158                          struct uverbs_attr_bundle *attrs,
2159                          struct devx_umem *obj)
2160 {
2161         u64 addr;
2162         size_t size;
2163         u32 access;
2164         int err;
2165
2166         if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2167             uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2168                 return -EFAULT;
2169
2170         err = uverbs_get_flags32(&access, attrs,
2171                                  MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2172                                  IB_ACCESS_LOCAL_WRITE |
2173                                  IB_ACCESS_REMOTE_WRITE |
2174                                  IB_ACCESS_REMOTE_READ);
2175         if (err)
2176                 return err;
2177
2178         err = ib_check_mr_access(&dev->ib_dev, access);
2179         if (err)
2180                 return err;
2181
2182         obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
2183         if (IS_ERR(obj->umem))
2184                 return PTR_ERR(obj->umem);
2185         return 0;
2186 }
2187
2188 static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2189                                                unsigned long pgsz_bitmap)
2190 {
2191         unsigned long page_size;
2192
2193         /* Don't bother checking larger page sizes as offset must be zero and
2194          * total DEVX umem length must be equal to total umem length.
2195          */
2196         pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2197                                          PAGE_SHIFT),
2198                                    MLX5_ADAPTER_PAGE_SHIFT);
2199         if (!pgsz_bitmap)
2200                 return 0;
2201
2202         page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2203         if (!page_size)
2204                 return 0;
2205
2206         /* If the page_size is less than the CPU page size then we can use the
2207          * offset and create a umem which is a subset of the page list.
2208          * For larger page sizes we can't be sure the DMA  list reflects the
2209          * VA so we must ensure that the umem extent is exactly equal to the
2210          * page list. Reduce the page size until one of these cases is true.
2211          */
2212         while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2213                 (umem->length % page_size) != 0) &&
2214                 page_size > PAGE_SIZE)
2215                 page_size /= 2;
2216
2217         return page_size;
2218 }
2219
2220 static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2221                                    struct uverbs_attr_bundle *attrs,
2222                                    struct devx_umem *obj,
2223                                    struct devx_umem_reg_cmd *cmd)
2224 {
2225         unsigned long pgsz_bitmap;
2226         unsigned int page_size;
2227         __be64 *mtt;
2228         void *umem;
2229         int ret;
2230
2231         /*
2232          * If the user does not pass in pgsz_bitmap then the user promises not
2233          * to use umem_offset!=0 in any commands that allocate on top of the
2234          * umem.
2235          *
2236          * If the user wants to use a umem_offset then it must pass in
2237          * pgsz_bitmap which guides the maximum page size and thus maximum
2238          * object alignment inside the umem. See the PRM.
2239          *
2240          * Users are not allowed to use IOVA here, mkeys are not supported on
2241          * umem.
2242          */
2243         ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2244                         MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2245                         GENMASK_ULL(63,
2246                                     min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2247         if (ret)
2248                 return ret;
2249
2250         page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2251         if (!page_size)
2252                 return -EINVAL;
2253
2254         cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2255                      (MLX5_ST_SZ_BYTES(mtt) *
2256                       ib_umem_num_dma_blocks(obj->umem, page_size));
2257         cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2258         if (IS_ERR(cmd->in))
2259                 return PTR_ERR(cmd->in);
2260
2261         umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2262         mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2263
2264         MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2265         MLX5_SET64(umem, umem, num_of_mtt,
2266                    ib_umem_num_dma_blocks(obj->umem, page_size));
2267         MLX5_SET(umem, umem, log_page_size,
2268                  order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2269         MLX5_SET(umem, umem, page_offset,
2270                  ib_umem_dma_offset(obj->umem, page_size));
2271
2272         mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2273                              (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2274                                      MLX5_IB_MTT_READ);
2275         return 0;
2276 }
2277
2278 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2279         struct uverbs_attr_bundle *attrs)
2280 {
2281         struct devx_umem_reg_cmd cmd;
2282         struct devx_umem *obj;
2283         struct ib_uobject *uobj = uverbs_attr_get_uobject(
2284                 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2285         u32 obj_id;
2286         struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2287                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2288         struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2289         int err;
2290
2291         if (!c->devx_uid)
2292                 return -EINVAL;
2293
2294         obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2295         if (!obj)
2296                 return -ENOMEM;
2297
2298         err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2299         if (err)
2300                 goto err_obj_free;
2301
2302         err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
2303         if (err)
2304                 goto err_umem_release;
2305
2306         MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2307         err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2308                             sizeof(cmd.out));
2309         if (err)
2310                 goto err_umem_release;
2311
2312         obj->mdev = dev->mdev;
2313         uobj->object = obj;
2314         devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2315         uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2316
2317         err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2318                              sizeof(obj_id));
2319         return err;
2320
2321 err_umem_release:
2322         ib_umem_release(obj->umem);
2323 err_obj_free:
2324         kfree(obj);
2325         return err;
2326 }
2327
2328 static int devx_umem_cleanup(struct ib_uobject *uobject,
2329                              enum rdma_remove_reason why,
2330                              struct uverbs_attr_bundle *attrs)
2331 {
2332         struct devx_umem *obj = uobject->object;
2333         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2334         int err;
2335
2336         err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2337         if (err)
2338                 return err;
2339
2340         ib_umem_release(obj->umem);
2341         kfree(obj);
2342         return 0;
2343 }
2344
2345 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2346                                   unsigned long event_type)
2347 {
2348         __be64 *unaff_events;
2349         int mask_entry;
2350         int mask_bit;
2351
2352         if (!MLX5_CAP_GEN(dev, event_cap))
2353                 return is_legacy_unaffiliated_event_num(event_type);
2354
2355         unaff_events = MLX5_CAP_DEV_EVENT(dev,
2356                                           user_unaffiliated_events);
2357         WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2358
2359         mask_entry = event_type / 64;
2360         mask_bit = event_type % 64;
2361
2362         if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2363                 return false;
2364
2365         return true;
2366 }
2367
2368 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2369 {
2370         struct mlx5_eqe *eqe = data;
2371         u32 obj_id = 0;
2372
2373         switch (event_type) {
2374         case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2375         case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2376         case MLX5_EVENT_TYPE_PATH_MIG:
2377         case MLX5_EVENT_TYPE_COMM_EST:
2378         case MLX5_EVENT_TYPE_SQ_DRAINED:
2379         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2380         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2381         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2382         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2383         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2384                 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2385                 break;
2386         case MLX5_EVENT_TYPE_XRQ_ERROR:
2387                 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2388                 break;
2389         case MLX5_EVENT_TYPE_DCT_DRAINED:
2390         case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2391                 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2392                 break;
2393         case MLX5_EVENT_TYPE_CQ_ERROR:
2394                 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2395                 break;
2396         default:
2397                 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2398                 break;
2399         }
2400
2401         return obj_id;
2402 }
2403
2404 static int deliver_event(struct devx_event_subscription *event_sub,
2405                          const void *data)
2406 {
2407         struct devx_async_event_file *ev_file;
2408         struct devx_async_event_data *event_data;
2409         unsigned long flags;
2410
2411         ev_file = event_sub->ev_file;
2412
2413         if (ev_file->omit_data) {
2414                 spin_lock_irqsave(&ev_file->lock, flags);
2415                 if (!list_empty(&event_sub->event_list) ||
2416                     ev_file->is_destroyed) {
2417                         spin_unlock_irqrestore(&ev_file->lock, flags);
2418                         return 0;
2419                 }
2420
2421                 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2422                 spin_unlock_irqrestore(&ev_file->lock, flags);
2423                 wake_up_interruptible(&ev_file->poll_wait);
2424                 return 0;
2425         }
2426
2427         event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2428                              GFP_ATOMIC);
2429         if (!event_data) {
2430                 spin_lock_irqsave(&ev_file->lock, flags);
2431                 ev_file->is_overflow_err = 1;
2432                 spin_unlock_irqrestore(&ev_file->lock, flags);
2433                 return -ENOMEM;
2434         }
2435
2436         event_data->hdr.cookie = event_sub->cookie;
2437         memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2438
2439         spin_lock_irqsave(&ev_file->lock, flags);
2440         if (!ev_file->is_destroyed)
2441                 list_add_tail(&event_data->list, &ev_file->event_list);
2442         else
2443                 kfree(event_data);
2444         spin_unlock_irqrestore(&ev_file->lock, flags);
2445         wake_up_interruptible(&ev_file->poll_wait);
2446
2447         return 0;
2448 }
2449
2450 static void dispatch_event_fd(struct list_head *fd_list,
2451                               const void *data)
2452 {
2453         struct devx_event_subscription *item;
2454
2455         list_for_each_entry_rcu(item, fd_list, xa_list) {
2456                 if (item->eventfd)
2457                         eventfd_signal(item->eventfd, 1);
2458                 else
2459                         deliver_event(item, data);
2460         }
2461 }
2462
2463 static int devx_event_notifier(struct notifier_block *nb,
2464                                unsigned long event_type, void *data)
2465 {
2466         struct mlx5_devx_event_table *table;
2467         struct mlx5_ib_dev *dev;
2468         struct devx_event *event;
2469         struct devx_obj_event *obj_event;
2470         u16 obj_type = 0;
2471         bool is_unaffiliated;
2472         u32 obj_id;
2473
2474         /* Explicit filtering to kernel events which may occur frequently */
2475         if (event_type == MLX5_EVENT_TYPE_CMD ||
2476             event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2477                 return NOTIFY_OK;
2478
2479         table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2480         dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2481         is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2482
2483         if (!is_unaffiliated)
2484                 obj_type = get_event_obj_type(event_type, data);
2485
2486         rcu_read_lock();
2487         event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2488         if (!event) {
2489                 rcu_read_unlock();
2490                 return NOTIFY_DONE;
2491         }
2492
2493         if (is_unaffiliated) {
2494                 dispatch_event_fd(&event->unaffiliated_list, data);
2495                 rcu_read_unlock();
2496                 return NOTIFY_OK;
2497         }
2498
2499         obj_id = devx_get_obj_id_from_event(event_type, data);
2500         obj_event = xa_load(&event->object_ids, obj_id);
2501         if (!obj_event) {
2502                 rcu_read_unlock();
2503                 return NOTIFY_DONE;
2504         }
2505
2506         dispatch_event_fd(&obj_event->obj_sub_list, data);
2507
2508         rcu_read_unlock();
2509         return NOTIFY_OK;
2510 }
2511
2512 int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2513 {
2514         struct mlx5_devx_event_table *table = &dev->devx_event_table;
2515         int uid;
2516
2517         uid = mlx5_ib_devx_create(dev, false);
2518         if (uid > 0) {
2519                 dev->devx_whitelist_uid = uid;
2520                 xa_init(&table->event_xa);
2521                 mutex_init(&table->event_xa_lock);
2522                 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2523                 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2524         }
2525
2526         return 0;
2527 }
2528
2529 void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2530 {
2531         struct mlx5_devx_event_table *table = &dev->devx_event_table;
2532         struct devx_event_subscription *sub, *tmp;
2533         struct devx_event *event;
2534         void *entry;
2535         unsigned long id;
2536
2537         if (dev->devx_whitelist_uid) {
2538                 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2539                 mutex_lock(&dev->devx_event_table.event_xa_lock);
2540                 xa_for_each(&table->event_xa, id, entry) {
2541                         event = entry;
2542                         list_for_each_entry_safe(
2543                                 sub, tmp, &event->unaffiliated_list, xa_list)
2544                                 devx_cleanup_subscription(dev, sub);
2545                         kfree(entry);
2546                 }
2547                 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2548                 xa_destroy(&table->event_xa);
2549
2550                 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2551         }
2552 }
2553
2554 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2555                                          size_t count, loff_t *pos)
2556 {
2557         struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2558         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2559         struct devx_async_data *event;
2560         int ret = 0;
2561         size_t eventsz;
2562
2563         spin_lock_irq(&ev_queue->lock);
2564
2565         while (list_empty(&ev_queue->event_list)) {
2566                 spin_unlock_irq(&ev_queue->lock);
2567
2568                 if (filp->f_flags & O_NONBLOCK)
2569                         return -EAGAIN;
2570
2571                 if (wait_event_interruptible(
2572                             ev_queue->poll_wait,
2573                             (!list_empty(&ev_queue->event_list) ||
2574                              ev_queue->is_destroyed))) {
2575                         return -ERESTARTSYS;
2576                 }
2577
2578                 spin_lock_irq(&ev_queue->lock);
2579                 if (ev_queue->is_destroyed) {
2580                         spin_unlock_irq(&ev_queue->lock);
2581                         return -EIO;
2582                 }
2583         }
2584
2585         event = list_entry(ev_queue->event_list.next,
2586                            struct devx_async_data, list);
2587         eventsz = event->cmd_out_len +
2588                         sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2589
2590         if (eventsz > count) {
2591                 spin_unlock_irq(&ev_queue->lock);
2592                 return -ENOSPC;
2593         }
2594
2595         list_del(ev_queue->event_list.next);
2596         spin_unlock_irq(&ev_queue->lock);
2597
2598         if (copy_to_user(buf, &event->hdr, eventsz))
2599                 ret = -EFAULT;
2600         else
2601                 ret = eventsz;
2602
2603         atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2604         kvfree(event);
2605         return ret;
2606 }
2607
2608 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2609                                               struct poll_table_struct *wait)
2610 {
2611         struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2612         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2613         __poll_t pollflags = 0;
2614
2615         poll_wait(filp, &ev_queue->poll_wait, wait);
2616
2617         spin_lock_irq(&ev_queue->lock);
2618         if (ev_queue->is_destroyed)
2619                 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2620         else if (!list_empty(&ev_queue->event_list))
2621                 pollflags = EPOLLIN | EPOLLRDNORM;
2622         spin_unlock_irq(&ev_queue->lock);
2623
2624         return pollflags;
2625 }
2626
2627 static const struct file_operations devx_async_cmd_event_fops = {
2628         .owner   = THIS_MODULE,
2629         .read    = devx_async_cmd_event_read,
2630         .poll    = devx_async_cmd_event_poll,
2631         .release = uverbs_uobject_fd_release,
2632         .llseek  = no_llseek,
2633 };
2634
2635 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2636                                      size_t count, loff_t *pos)
2637 {
2638         struct devx_async_event_file *ev_file = filp->private_data;
2639         struct devx_event_subscription *event_sub;
2640         struct devx_async_event_data *event;
2641         int ret = 0;
2642         size_t eventsz;
2643         bool omit_data;
2644         void *event_data;
2645
2646         omit_data = ev_file->omit_data;
2647
2648         spin_lock_irq(&ev_file->lock);
2649
2650         if (ev_file->is_overflow_err) {
2651                 ev_file->is_overflow_err = 0;
2652                 spin_unlock_irq(&ev_file->lock);
2653                 return -EOVERFLOW;
2654         }
2655
2656
2657         while (list_empty(&ev_file->event_list)) {
2658                 spin_unlock_irq(&ev_file->lock);
2659
2660                 if (filp->f_flags & O_NONBLOCK)
2661                         return -EAGAIN;
2662
2663                 if (wait_event_interruptible(ev_file->poll_wait,
2664                             (!list_empty(&ev_file->event_list) ||
2665                              ev_file->is_destroyed))) {
2666                         return -ERESTARTSYS;
2667                 }
2668
2669                 spin_lock_irq(&ev_file->lock);
2670                 if (ev_file->is_destroyed) {
2671                         spin_unlock_irq(&ev_file->lock);
2672                         return -EIO;
2673                 }
2674         }
2675
2676         if (omit_data) {
2677                 event_sub = list_first_entry(&ev_file->event_list,
2678                                         struct devx_event_subscription,
2679                                         event_list);
2680                 eventsz = sizeof(event_sub->cookie);
2681                 event_data = &event_sub->cookie;
2682         } else {
2683                 event = list_first_entry(&ev_file->event_list,
2684                                       struct devx_async_event_data, list);
2685                 eventsz = sizeof(struct mlx5_eqe) +
2686                         sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2687                 event_data = &event->hdr;
2688         }
2689
2690         if (eventsz > count) {
2691                 spin_unlock_irq(&ev_file->lock);
2692                 return -EINVAL;
2693         }
2694
2695         if (omit_data)
2696                 list_del_init(&event_sub->event_list);
2697         else
2698                 list_del(&event->list);
2699
2700         spin_unlock_irq(&ev_file->lock);
2701
2702         if (copy_to_user(buf, event_data, eventsz))
2703                 /* This points to an application issue, not a kernel concern */
2704                 ret = -EFAULT;
2705         else
2706                 ret = eventsz;
2707
2708         if (!omit_data)
2709                 kfree(event);
2710         return ret;
2711 }
2712
2713 static __poll_t devx_async_event_poll(struct file *filp,
2714                                       struct poll_table_struct *wait)
2715 {
2716         struct devx_async_event_file *ev_file = filp->private_data;
2717         __poll_t pollflags = 0;
2718
2719         poll_wait(filp, &ev_file->poll_wait, wait);
2720
2721         spin_lock_irq(&ev_file->lock);
2722         if (ev_file->is_destroyed)
2723                 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2724         else if (!list_empty(&ev_file->event_list))
2725                 pollflags = EPOLLIN | EPOLLRDNORM;
2726         spin_unlock_irq(&ev_file->lock);
2727
2728         return pollflags;
2729 }
2730
2731 static void devx_free_subscription(struct rcu_head *rcu)
2732 {
2733         struct devx_event_subscription *event_sub =
2734                 container_of(rcu, struct devx_event_subscription, rcu);
2735
2736         if (event_sub->eventfd)
2737                 eventfd_ctx_put(event_sub->eventfd);
2738         uverbs_uobject_put(&event_sub->ev_file->uobj);
2739         kfree(event_sub);
2740 }
2741
2742 static const struct file_operations devx_async_event_fops = {
2743         .owner   = THIS_MODULE,
2744         .read    = devx_async_event_read,
2745         .poll    = devx_async_event_poll,
2746         .release = uverbs_uobject_fd_release,
2747         .llseek  = no_llseek,
2748 };
2749
2750 static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2751                                               enum rdma_remove_reason why)
2752 {
2753         struct devx_async_cmd_event_file *comp_ev_file =
2754                 container_of(uobj, struct devx_async_cmd_event_file,
2755                              uobj);
2756         struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2757         struct devx_async_data *entry, *tmp;
2758
2759         spin_lock_irq(&ev_queue->lock);
2760         ev_queue->is_destroyed = 1;
2761         spin_unlock_irq(&ev_queue->lock);
2762         wake_up_interruptible(&ev_queue->poll_wait);
2763
2764         mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2765
2766         spin_lock_irq(&comp_ev_file->ev_queue.lock);
2767         list_for_each_entry_safe(entry, tmp,
2768                                  &comp_ev_file->ev_queue.event_list, list) {
2769                 list_del(&entry->list);
2770                 kvfree(entry);
2771         }
2772         spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2773 };
2774
2775 static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2776                                           enum rdma_remove_reason why)
2777 {
2778         struct devx_async_event_file *ev_file =
2779                 container_of(uobj, struct devx_async_event_file,
2780                              uobj);
2781         struct devx_event_subscription *event_sub, *event_sub_tmp;
2782         struct mlx5_ib_dev *dev = ev_file->dev;
2783
2784         spin_lock_irq(&ev_file->lock);
2785         ev_file->is_destroyed = 1;
2786
2787         /* free the pending events allocation */
2788         if (ev_file->omit_data) {
2789                 struct devx_event_subscription *event_sub, *tmp;
2790
2791                 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2792                                          event_list)
2793                         list_del_init(&event_sub->event_list);
2794
2795         } else {
2796                 struct devx_async_event_data *entry, *tmp;
2797
2798                 list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2799                                          list) {
2800                         list_del(&entry->list);
2801                         kfree(entry);
2802                 }
2803         }
2804
2805         spin_unlock_irq(&ev_file->lock);
2806         wake_up_interruptible(&ev_file->poll_wait);
2807
2808         mutex_lock(&dev->devx_event_table.event_xa_lock);
2809         /* delete the subscriptions which are related to this FD */
2810         list_for_each_entry_safe(event_sub, event_sub_tmp,
2811                                  &ev_file->subscribed_events_list, file_list) {
2812                 devx_cleanup_subscription(dev, event_sub);
2813                 list_del_rcu(&event_sub->file_list);
2814                 /* subscription may not be used by the read API any more */
2815                 call_rcu(&event_sub->rcu, devx_free_subscription);
2816         }
2817         mutex_unlock(&dev->devx_event_table.event_xa_lock);
2818
2819         put_device(&dev->ib_dev.dev);
2820 };
2821
2822 DECLARE_UVERBS_NAMED_METHOD(
2823         MLX5_IB_METHOD_DEVX_UMEM_REG,
2824         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2825                         MLX5_IB_OBJECT_DEVX_UMEM,
2826                         UVERBS_ACCESS_NEW,
2827                         UA_MANDATORY),
2828         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2829                            UVERBS_ATTR_TYPE(u64),
2830                            UA_MANDATORY),
2831         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2832                            UVERBS_ATTR_TYPE(u64),
2833                            UA_MANDATORY),
2834         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2835                              enum ib_access_flags),
2836         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2837                              u64),
2838         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2839                             UVERBS_ATTR_TYPE(u32),
2840                             UA_MANDATORY));
2841
2842 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2843         MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2844         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2845                         MLX5_IB_OBJECT_DEVX_UMEM,
2846                         UVERBS_ACCESS_DESTROY,
2847                         UA_MANDATORY));
2848
2849 DECLARE_UVERBS_NAMED_METHOD(
2850         MLX5_IB_METHOD_DEVX_QUERY_EQN,
2851         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2852                            UVERBS_ATTR_TYPE(u32),
2853                            UA_MANDATORY),
2854         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2855                             UVERBS_ATTR_TYPE(u32),
2856                             UA_MANDATORY));
2857
2858 DECLARE_UVERBS_NAMED_METHOD(
2859         MLX5_IB_METHOD_DEVX_QUERY_UAR,
2860         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2861                            UVERBS_ATTR_TYPE(u32),
2862                            UA_MANDATORY),
2863         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2864                             UVERBS_ATTR_TYPE(u32),
2865                             UA_MANDATORY));
2866
2867 DECLARE_UVERBS_NAMED_METHOD(
2868         MLX5_IB_METHOD_DEVX_OTHER,
2869         UVERBS_ATTR_PTR_IN(
2870                 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2871                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2872                 UA_MANDATORY,
2873                 UA_ALLOC_AND_COPY),
2874         UVERBS_ATTR_PTR_OUT(
2875                 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2876                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2877                 UA_MANDATORY));
2878
2879 DECLARE_UVERBS_NAMED_METHOD(
2880         MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2881         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2882                         MLX5_IB_OBJECT_DEVX_OBJ,
2883                         UVERBS_ACCESS_NEW,
2884                         UA_MANDATORY),
2885         UVERBS_ATTR_PTR_IN(
2886                 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2887                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2888                 UA_MANDATORY,
2889                 UA_ALLOC_AND_COPY),
2890         UVERBS_ATTR_PTR_OUT(
2891                 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2892                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2893                 UA_MANDATORY));
2894
2895 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2896         MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2897         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2898                         MLX5_IB_OBJECT_DEVX_OBJ,
2899                         UVERBS_ACCESS_DESTROY,
2900                         UA_MANDATORY));
2901
2902 DECLARE_UVERBS_NAMED_METHOD(
2903         MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2904         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2905                         UVERBS_IDR_ANY_OBJECT,
2906                         UVERBS_ACCESS_WRITE,
2907                         UA_MANDATORY),
2908         UVERBS_ATTR_PTR_IN(
2909                 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2910                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2911                 UA_MANDATORY,
2912                 UA_ALLOC_AND_COPY),
2913         UVERBS_ATTR_PTR_OUT(
2914                 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2915                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2916                 UA_MANDATORY));
2917
2918 DECLARE_UVERBS_NAMED_METHOD(
2919         MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2920         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2921                         UVERBS_IDR_ANY_OBJECT,
2922                         UVERBS_ACCESS_READ,
2923                         UA_MANDATORY),
2924         UVERBS_ATTR_PTR_IN(
2925                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2926                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2927                 UA_MANDATORY,
2928                 UA_ALLOC_AND_COPY),
2929         UVERBS_ATTR_PTR_OUT(
2930                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2931                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2932                 UA_MANDATORY));
2933
2934 DECLARE_UVERBS_NAMED_METHOD(
2935         MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2936         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2937                         UVERBS_IDR_ANY_OBJECT,
2938                         UVERBS_ACCESS_READ,
2939                         UA_MANDATORY),
2940         UVERBS_ATTR_PTR_IN(
2941                 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2942                 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2943                 UA_MANDATORY,
2944                 UA_ALLOC_AND_COPY),
2945         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2946                 u16, UA_MANDATORY),
2947         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2948                 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2949                 UVERBS_ACCESS_READ,
2950                 UA_MANDATORY),
2951         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2952                 UVERBS_ATTR_TYPE(u64),
2953                 UA_MANDATORY));
2954
2955 DECLARE_UVERBS_NAMED_METHOD(
2956         MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2957         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2958                 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2959                 UVERBS_ACCESS_READ,
2960                 UA_MANDATORY),
2961         UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2962                 MLX5_IB_OBJECT_DEVX_OBJ,
2963                 UVERBS_ACCESS_READ,
2964                 UA_OPTIONAL),
2965         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2966                 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2967                 UA_MANDATORY,
2968                 UA_ALLOC_AND_COPY),
2969         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2970                 UVERBS_ATTR_TYPE(u64),
2971                 UA_OPTIONAL),
2972         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2973                 UVERBS_ATTR_TYPE(u32),
2974                 UA_OPTIONAL));
2975
2976 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2977                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2978                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2979                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2980                               &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2981
2982 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2983                             UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2984                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2985                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2986                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2987                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2988                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2989
2990 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2991                             UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2992                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2993                             &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2994
2995
2996 DECLARE_UVERBS_NAMED_METHOD(
2997         MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2998         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2999                         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3000                         UVERBS_ACCESS_NEW,
3001                         UA_MANDATORY));
3002
3003 DECLARE_UVERBS_NAMED_OBJECT(
3004         MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3005         UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3006                              devx_async_cmd_event_destroy_uobj,
3007                              &devx_async_cmd_event_fops, "[devx_async_cmd]",
3008                              O_RDONLY),
3009         &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3010
3011 DECLARE_UVERBS_NAMED_METHOD(
3012         MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3013         UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3014                         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3015                         UVERBS_ACCESS_NEW,
3016                         UA_MANDATORY),
3017         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3018                         enum mlx5_ib_uapi_devx_create_event_channel_flags,
3019                         UA_MANDATORY));
3020
3021 DECLARE_UVERBS_NAMED_OBJECT(
3022         MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3023         UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3024                              devx_async_event_destroy_uobj,
3025                              &devx_async_event_fops, "[devx_async_event]",
3026                              O_RDONLY),
3027         &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3028
3029 static bool devx_is_supported(struct ib_device *device)
3030 {
3031         struct mlx5_ib_dev *dev = to_mdev(device);
3032
3033         return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3034 }
3035
3036 const struct uapi_definition mlx5_ib_devx_defs[] = {
3037         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3038                 MLX5_IB_OBJECT_DEVX,
3039                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3040         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3041                 MLX5_IB_OBJECT_DEVX_OBJ,
3042                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3043         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3044                 MLX5_IB_OBJECT_DEVX_UMEM,
3045                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3046         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3047                 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3048                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3049         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3050                 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3051                 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3052         {},
3053 };