Merge branch 'stable/for-jens-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
39
40 #include "mlx5_core.h"
41 #include "lib/eq.h"
42
43 static struct mlx5_core_rsc_common *
44 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45 {
46         struct mlx5_core_rsc_common *common;
47         unsigned long flags;
48
49         spin_lock_irqsave(&table->lock, flags);
50
51         common = radix_tree_lookup(&table->tree, rsn);
52         if (common)
53                 atomic_inc(&common->refcount);
54
55         spin_unlock_irqrestore(&table->lock, flags);
56
57         return common;
58 }
59
60 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
61 {
62         if (atomic_dec_and_test(&common->refcount))
63                 complete(&common->free);
64 }
65
66 static u64 qp_allowed_event_types(void)
67 {
68         u64 mask;
69
70         mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
71                BIT(MLX5_EVENT_TYPE_COMM_EST) |
72                BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
73                BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
74                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
75                BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
76                BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
77                BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
78
79         return mask;
80 }
81
82 static u64 rq_allowed_event_types(void)
83 {
84         u64 mask;
85
86         mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
87                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
88
89         return mask;
90 }
91
92 static u64 sq_allowed_event_types(void)
93 {
94         return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
95 }
96
97 static u64 dct_allowed_event_types(void)
98 {
99         return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
100 }
101
102 static bool is_event_type_allowed(int rsc_type, int event_type)
103 {
104         switch (rsc_type) {
105         case MLX5_EVENT_QUEUE_TYPE_QP:
106                 return BIT(event_type) & qp_allowed_event_types();
107         case MLX5_EVENT_QUEUE_TYPE_RQ:
108                 return BIT(event_type) & rq_allowed_event_types();
109         case MLX5_EVENT_QUEUE_TYPE_SQ:
110                 return BIT(event_type) & sq_allowed_event_types();
111         case MLX5_EVENT_QUEUE_TYPE_DCT:
112                 return BIT(event_type) & dct_allowed_event_types();
113         default:
114                 WARN(1, "Event arrived for unknown resource type");
115                 return false;
116         }
117 }
118
119 static int rsc_event_notifier(struct notifier_block *nb,
120                               unsigned long type, void *data)
121 {
122         struct mlx5_core_rsc_common *common;
123         struct mlx5_qp_table *table;
124         struct mlx5_core_dev *dev;
125         struct mlx5_core_dct *dct;
126         u8 event_type = (u8)type;
127         struct mlx5_core_qp *qp;
128         struct mlx5_priv *priv;
129         struct mlx5_eqe *eqe;
130         u32 rsn;
131
132         switch (event_type) {
133         case MLX5_EVENT_TYPE_DCT_DRAINED:
134                 eqe = data;
135                 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
136                 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
137                 break;
138         case MLX5_EVENT_TYPE_PATH_MIG:
139         case MLX5_EVENT_TYPE_COMM_EST:
140         case MLX5_EVENT_TYPE_SQ_DRAINED:
141         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
142         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
143         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
144         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
145         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
146                 eqe = data;
147                 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
148                 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
149                 break;
150         default:
151                 return NOTIFY_DONE;
152         }
153
154         table = container_of(nb, struct mlx5_qp_table, nb);
155         priv  = container_of(table, struct mlx5_priv, qp_table);
156         dev   = container_of(priv, struct mlx5_core_dev, priv);
157
158         mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
159
160         common = mlx5_get_rsc(table, rsn);
161         if (!common) {
162                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
163                 return NOTIFY_OK;
164         }
165
166         if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
167                 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
168                                event_type, rsn);
169                 goto out;
170         }
171
172         switch (common->res) {
173         case MLX5_RES_QP:
174         case MLX5_RES_RQ:
175         case MLX5_RES_SQ:
176                 qp = (struct mlx5_core_qp *)common;
177                 qp->event(qp, event_type);
178                 break;
179         case MLX5_RES_DCT:
180                 dct = (struct mlx5_core_dct *)common;
181                 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
182                         complete(&dct->drained);
183                 break;
184         default:
185                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
186         }
187 out:
188         mlx5_core_put_rsc(common);
189
190         return NOTIFY_OK;
191 }
192
193 static int create_resource_common(struct mlx5_core_dev *dev,
194                                   struct mlx5_core_qp *qp,
195                                   int rsc_type)
196 {
197         struct mlx5_qp_table *table = &dev->priv.qp_table;
198         int err;
199
200         qp->common.res = rsc_type;
201         spin_lock_irq(&table->lock);
202         err = radix_tree_insert(&table->tree,
203                                 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
204                                 qp);
205         spin_unlock_irq(&table->lock);
206         if (err)
207                 return err;
208
209         atomic_set(&qp->common.refcount, 1);
210         init_completion(&qp->common.free);
211         qp->pid = current->pid;
212
213         return 0;
214 }
215
216 static void destroy_resource_common(struct mlx5_core_dev *dev,
217                                     struct mlx5_core_qp *qp)
218 {
219         struct mlx5_qp_table *table = &dev->priv.qp_table;
220         unsigned long flags;
221
222         spin_lock_irqsave(&table->lock, flags);
223         radix_tree_delete(&table->tree,
224                           qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
225         spin_unlock_irqrestore(&table->lock, flags);
226         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
227         wait_for_completion(&qp->common.free);
228 }
229
230 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
231                          struct mlx5_core_dct *dct,
232                          u32 *in, int inlen)
233 {
234         u32 out[MLX5_ST_SZ_DW(create_dct_out)]   = {0};
235         u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
236         u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237         struct mlx5_core_qp *qp = &dct->mqp;
238         int err;
239
240         init_completion(&dct->drained);
241         MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
242
243         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
244         if (err) {
245                 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
246                 return err;
247         }
248
249         qp->qpn = MLX5_GET(create_dct_out, out, dctn);
250         qp->uid = MLX5_GET(create_dct_in, in, uid);
251         err = create_resource_common(dev, qp, MLX5_RES_DCT);
252         if (err)
253                 goto err_cmd;
254
255         return 0;
256 err_cmd:
257         MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
258         MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
259         MLX5_SET(destroy_dct_in, din, uid, qp->uid);
260         mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
261                       (void *)&out, sizeof(dout));
262         return err;
263 }
264 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
265
266 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
267                         struct mlx5_core_qp *qp,
268                         u32 *in, int inlen)
269 {
270         u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
271         u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
272         u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
273         int err;
274
275         MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
276
277         err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
278         if (err)
279                 return err;
280
281         qp->uid = MLX5_GET(create_qp_in, in, uid);
282         qp->qpn = MLX5_GET(create_qp_out, out, qpn);
283         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
284
285         err = create_resource_common(dev, qp, MLX5_RES_QP);
286         if (err)
287                 goto err_cmd;
288
289         err = mlx5_debug_qp_add(dev, qp);
290         if (err)
291                 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
292                               qp->qpn);
293
294         atomic_inc(&dev->num_qps);
295
296         return 0;
297
298 err_cmd:
299         memset(din, 0, sizeof(din));
300         memset(dout, 0, sizeof(dout));
301         MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
302         MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
303         MLX5_SET(destroy_qp_in, din, uid, qp->uid);
304         mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
305         return err;
306 }
307 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
308
309 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
310                                struct mlx5_core_dct *dct)
311 {
312         u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
313         u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
314         struct mlx5_core_qp *qp = &dct->mqp;
315
316         MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
317         MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
318         MLX5_SET(drain_dct_in, in, uid, qp->uid);
319         return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
320                              (void *)&out, sizeof(out));
321 }
322
323 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
324                           struct mlx5_core_dct *dct)
325 {
326         u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
327         u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
328         struct mlx5_core_qp *qp = &dct->mqp;
329         int err;
330
331         err = mlx5_core_drain_dct(dev, dct);
332         if (err) {
333                 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
334                         goto destroy;
335                 } else {
336                         mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
337                         return err;
338                 }
339         }
340         wait_for_completion(&dct->drained);
341 destroy:
342         destroy_resource_common(dev, &dct->mqp);
343         MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
344         MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
345         MLX5_SET(destroy_dct_in, in, uid, qp->uid);
346         err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
347                             (void *)&out, sizeof(out));
348         return err;
349 }
350 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
351
352 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
353                          struct mlx5_core_qp *qp)
354 {
355         u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
356         u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
357         int err;
358
359         mlx5_debug_qp_remove(dev, qp);
360
361         destroy_resource_common(dev, qp);
362
363         MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
364         MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
365         MLX5_SET(destroy_qp_in, in, uid, qp->uid);
366         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
367         if (err)
368                 return err;
369
370         atomic_dec(&dev->num_qps);
371         return 0;
372 }
373 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
374
375 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
376                              u32 timeout_usec)
377 {
378         u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
379         u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)]   = {0};
380
381         MLX5_SET(set_delay_drop_params_in, in, opcode,
382                  MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
383         MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
384                  timeout_usec / 100);
385         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
386 }
387 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
388
389 struct mbox_info {
390         u32 *in;
391         u32 *out;
392         int inlen;
393         int outlen;
394 };
395
396 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
397 {
398         mbox->inlen  = inlen;
399         mbox->outlen = outlen;
400         mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
401         mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
402         if (!mbox->in || !mbox->out) {
403                 kfree(mbox->in);
404                 kfree(mbox->out);
405                 return -ENOMEM;
406         }
407
408         return 0;
409 }
410
411 static void mbox_free(struct mbox_info *mbox)
412 {
413         kfree(mbox->in);
414         kfree(mbox->out);
415 }
416
417 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
418                                 u32 opt_param_mask, void *qpc,
419                                 struct mbox_info *mbox, u16 uid)
420 {
421         mbox->out = NULL;
422         mbox->in = NULL;
423
424 #define MBOX_ALLOC(mbox, typ)  \
425         mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
426
427 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid)                            \
428         do {                                                                   \
429                 MLX5_SET(typ##_in, in, opcode, _opcode);                       \
430                 MLX5_SET(typ##_in, in, qpn, _qpn);                             \
431                 MLX5_SET(typ##_in, in, uid, _uid);                             \
432         } while (0)
433
434 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid)          \
435         do {                                                                   \
436                 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid);                   \
437                 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p);                \
438                 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc,                  \
439                        MLX5_ST_SZ_BYTES(qpc));                                 \
440         } while (0)
441
442         switch (opcode) {
443         /* 2RST & 2ERR */
444         case MLX5_CMD_OP_2RST_QP:
445                 if (MBOX_ALLOC(mbox, qp_2rst))
446                         return -ENOMEM;
447                 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
448                 break;
449         case MLX5_CMD_OP_2ERR_QP:
450                 if (MBOX_ALLOC(mbox, qp_2err))
451                         return -ENOMEM;
452                 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
453                 break;
454
455         /* MODIFY with QPC */
456         case MLX5_CMD_OP_RST2INIT_QP:
457                 if (MBOX_ALLOC(mbox, rst2init_qp))
458                         return -ENOMEM;
459                 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
460                                   opt_param_mask, qpc, uid);
461                 break;
462         case MLX5_CMD_OP_INIT2RTR_QP:
463                 if (MBOX_ALLOC(mbox, init2rtr_qp))
464                         return -ENOMEM;
465                 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
466                                   opt_param_mask, qpc, uid);
467                 break;
468         case MLX5_CMD_OP_RTR2RTS_QP:
469                 if (MBOX_ALLOC(mbox, rtr2rts_qp))
470                         return -ENOMEM;
471                 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
472                                   opt_param_mask, qpc, uid);
473                 break;
474         case MLX5_CMD_OP_RTS2RTS_QP:
475                 if (MBOX_ALLOC(mbox, rts2rts_qp))
476                         return -ENOMEM;
477                 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
478                                   opt_param_mask, qpc, uid);
479                 break;
480         case MLX5_CMD_OP_SQERR2RTS_QP:
481                 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
482                         return -ENOMEM;
483                 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
484                                   opt_param_mask, qpc, uid);
485                 break;
486         case MLX5_CMD_OP_INIT2INIT_QP:
487                 if (MBOX_ALLOC(mbox, init2init_qp))
488                         return -ENOMEM;
489                 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
490                                   opt_param_mask, qpc, uid);
491                 break;
492         default:
493                 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
494                               opcode, qpn);
495                 return -EINVAL;
496         }
497         return 0;
498 }
499
500 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
501                         u32 opt_param_mask, void *qpc,
502                         struct mlx5_core_qp *qp)
503 {
504         struct mbox_info mbox;
505         int err;
506
507         err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
508                                    opt_param_mask, qpc, &mbox, qp->uid);
509         if (err)
510                 return err;
511
512         err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
513         mbox_free(&mbox);
514         return err;
515 }
516 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
517
518 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
519 {
520         struct mlx5_qp_table *table = &dev->priv.qp_table;
521
522         memset(table, 0, sizeof(*table));
523         spin_lock_init(&table->lock);
524         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
525         mlx5_qp_debugfs_init(dev);
526
527         table->nb.notifier_call = rsc_event_notifier;
528         mlx5_notifier_register(dev, &table->nb);
529 }
530
531 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
532 {
533         struct mlx5_qp_table *table = &dev->priv.qp_table;
534
535         mlx5_notifier_unregister(dev, &table->nb);
536         mlx5_qp_debugfs_cleanup(dev);
537 }
538
539 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
540                        u32 *out, int outlen)
541 {
542         u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
543
544         MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
545         MLX5_SET(query_qp_in, in, qpn, qp->qpn);
546         return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
547 }
548 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
549
550 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
551                         u32 *out, int outlen)
552 {
553         u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
554         struct mlx5_core_qp *qp = &dct->mqp;
555
556         MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
557         MLX5_SET(query_dct_in, in, dctn, qp->qpn);
558
559         return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
560                              (void *)out, outlen);
561 }
562 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
563
564 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
565 {
566         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
567         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
568         int err;
569
570         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
571         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
572         if (!err)
573                 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
574         return err;
575 }
576 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
577
578 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
579 {
580         u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
581         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
582
583         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
584         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
585         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
586 }
587 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
588
589 static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
590 {
591         u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]   = {};
592         u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
593
594         MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
595         MLX5_SET(destroy_rq_in, in, rqn, rqn);
596         MLX5_SET(destroy_rq_in, in, uid, uid);
597         mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
598 }
599
600 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
601                                 struct mlx5_core_qp *rq)
602 {
603         int err;
604         u32 rqn;
605
606         err = mlx5_core_create_rq(dev, in, inlen, &rqn);
607         if (err)
608                 return err;
609
610         rq->uid = MLX5_GET(create_rq_in, in, uid);
611         rq->qpn = rqn;
612         err = create_resource_common(dev, rq, MLX5_RES_RQ);
613         if (err)
614                 goto err_destroy_rq;
615
616         return 0;
617
618 err_destroy_rq:
619         destroy_rq_tracked(dev, rq->qpn, rq->uid);
620
621         return err;
622 }
623 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
624
625 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
626                                   struct mlx5_core_qp *rq)
627 {
628         destroy_resource_common(dev, rq);
629         destroy_rq_tracked(dev, rq->qpn, rq->uid);
630 }
631 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
632
633 static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
634 {
635         u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]   = {};
636         u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
637
638         MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
639         MLX5_SET(destroy_sq_in, in, sqn, sqn);
640         MLX5_SET(destroy_sq_in, in, uid, uid);
641         mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
642 }
643
644 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
645                                 struct mlx5_core_qp *sq)
646 {
647         int err;
648         u32 sqn;
649
650         err = mlx5_core_create_sq(dev, in, inlen, &sqn);
651         if (err)
652                 return err;
653
654         sq->uid = MLX5_GET(create_sq_in, in, uid);
655         sq->qpn = sqn;
656         err = create_resource_common(dev, sq, MLX5_RES_SQ);
657         if (err)
658                 goto err_destroy_sq;
659
660         return 0;
661
662 err_destroy_sq:
663         destroy_sq_tracked(dev, sq->qpn, sq->uid);
664
665         return err;
666 }
667 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
668
669 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
670                                   struct mlx5_core_qp *sq)
671 {
672         destroy_resource_common(dev, sq);
673         destroy_sq_tracked(dev, sq->qpn, sq->uid);
674 }
675 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
676
677 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
678 {
679         u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
680         u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
681         int err;
682
683         MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
684         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
685         if (!err)
686                 *counter_id = MLX5_GET(alloc_q_counter_out, out,
687                                        counter_set_id);
688         return err;
689 }
690 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
691
692 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
693 {
694         u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
695         u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
696
697         MLX5_SET(dealloc_q_counter_in, in, opcode,
698                  MLX5_CMD_OP_DEALLOC_Q_COUNTER);
699         MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
700         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
701 }
702 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
703
704 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
705                               int reset, void *out, int out_size)
706 {
707         u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
708
709         MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
710         MLX5_SET(query_q_counter_in, in, clear, reset);
711         MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
712         return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
713 }
714 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
715
716 struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
717                                                 int res_num,
718                                                 enum mlx5_res_type res_type)
719 {
720         u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
721         struct mlx5_qp_table *table = &dev->priv.qp_table;
722
723         return mlx5_get_rsc(table, rsn);
724 }
725 EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
726
727 void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
728 {
729         mlx5_core_put_rsc(res);
730 }
731 EXPORT_SYMBOL_GPL(mlx5_core_res_put);