2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
40 #include "mlx5_core.h"
43 static struct mlx5_core_rsc_common *
44 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
46 struct mlx5_core_rsc_common *common;
49 spin_lock_irqsave(&table->lock, flags);
51 common = radix_tree_lookup(&table->tree, rsn);
53 atomic_inc(&common->refcount);
55 spin_unlock_irqrestore(&table->lock, flags);
60 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
62 if (atomic_dec_and_test(&common->refcount))
63 complete(&common->free);
66 static u64 qp_allowed_event_types(void)
70 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
71 BIT(MLX5_EVENT_TYPE_COMM_EST) |
72 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
73 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
74 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
75 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
76 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
77 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
82 static u64 rq_allowed_event_types(void)
86 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
87 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
92 static u64 sq_allowed_event_types(void)
94 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
97 static u64 dct_allowed_event_types(void)
99 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
102 static bool is_event_type_allowed(int rsc_type, int event_type)
105 case MLX5_EVENT_QUEUE_TYPE_QP:
106 return BIT(event_type) & qp_allowed_event_types();
107 case MLX5_EVENT_QUEUE_TYPE_RQ:
108 return BIT(event_type) & rq_allowed_event_types();
109 case MLX5_EVENT_QUEUE_TYPE_SQ:
110 return BIT(event_type) & sq_allowed_event_types();
111 case MLX5_EVENT_QUEUE_TYPE_DCT:
112 return BIT(event_type) & dct_allowed_event_types();
114 WARN(1, "Event arrived for unknown resource type");
119 static int rsc_event_notifier(struct notifier_block *nb,
120 unsigned long type, void *data)
122 struct mlx5_core_rsc_common *common;
123 struct mlx5_qp_table *table;
124 struct mlx5_core_dev *dev;
125 struct mlx5_core_dct *dct;
126 u8 event_type = (u8)type;
127 struct mlx5_core_qp *qp;
128 struct mlx5_priv *priv;
129 struct mlx5_eqe *eqe;
132 switch (event_type) {
133 case MLX5_EVENT_TYPE_DCT_DRAINED:
135 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
136 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
138 case MLX5_EVENT_TYPE_PATH_MIG:
139 case MLX5_EVENT_TYPE_COMM_EST:
140 case MLX5_EVENT_TYPE_SQ_DRAINED:
141 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
142 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
143 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
144 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
145 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
148 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
154 table = container_of(nb, struct mlx5_qp_table, nb);
155 priv = container_of(table, struct mlx5_priv, qp_table);
156 dev = container_of(priv, struct mlx5_core_dev, priv);
158 mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
160 common = mlx5_get_rsc(table, rsn);
162 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
166 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
167 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
172 switch (common->res) {
176 qp = (struct mlx5_core_qp *)common;
177 qp->event(qp, event_type);
180 dct = (struct mlx5_core_dct *)common;
181 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
182 complete(&dct->drained);
185 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
188 mlx5_core_put_rsc(common);
193 static int create_resource_common(struct mlx5_core_dev *dev,
194 struct mlx5_core_qp *qp,
197 struct mlx5_qp_table *table = &dev->priv.qp_table;
200 qp->common.res = rsc_type;
201 spin_lock_irq(&table->lock);
202 err = radix_tree_insert(&table->tree,
203 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
205 spin_unlock_irq(&table->lock);
209 atomic_set(&qp->common.refcount, 1);
210 init_completion(&qp->common.free);
211 qp->pid = current->pid;
216 static void destroy_resource_common(struct mlx5_core_dev *dev,
217 struct mlx5_core_qp *qp)
219 struct mlx5_qp_table *table = &dev->priv.qp_table;
222 spin_lock_irqsave(&table->lock, flags);
223 radix_tree_delete(&table->tree,
224 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
225 spin_unlock_irqrestore(&table->lock, flags);
226 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
227 wait_for_completion(&qp->common.free);
230 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
231 struct mlx5_core_dct *dct,
234 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
235 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
236 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 struct mlx5_core_qp *qp = &dct->mqp;
240 init_completion(&dct->drained);
241 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
243 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
245 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
249 qp->qpn = MLX5_GET(create_dct_out, out, dctn);
250 qp->uid = MLX5_GET(create_dct_in, in, uid);
251 err = create_resource_common(dev, qp, MLX5_RES_DCT);
257 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
258 MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
259 MLX5_SET(destroy_dct_in, din, uid, qp->uid);
260 mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
261 (void *)&out, sizeof(dout));
264 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
266 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
267 struct mlx5_core_qp *qp,
270 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
271 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
272 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
275 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
277 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
281 qp->uid = MLX5_GET(create_qp_in, in, uid);
282 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
283 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
285 err = create_resource_common(dev, qp, MLX5_RES_QP);
289 err = mlx5_debug_qp_add(dev, qp);
291 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
294 atomic_inc(&dev->num_qps);
299 memset(din, 0, sizeof(din));
300 memset(dout, 0, sizeof(dout));
301 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
302 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
303 MLX5_SET(destroy_qp_in, din, uid, qp->uid);
304 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
307 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
309 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
310 struct mlx5_core_dct *dct)
312 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
313 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
314 struct mlx5_core_qp *qp = &dct->mqp;
316 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
317 MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
318 MLX5_SET(drain_dct_in, in, uid, qp->uid);
319 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
320 (void *)&out, sizeof(out));
323 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
324 struct mlx5_core_dct *dct)
326 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
327 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
328 struct mlx5_core_qp *qp = &dct->mqp;
331 err = mlx5_core_drain_dct(dev, dct);
333 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
336 mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
340 wait_for_completion(&dct->drained);
342 destroy_resource_common(dev, &dct->mqp);
343 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
344 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
345 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
346 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
347 (void *)&out, sizeof(out));
350 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
352 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
353 struct mlx5_core_qp *qp)
355 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
356 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
359 mlx5_debug_qp_remove(dev, qp);
361 destroy_resource_common(dev, qp);
363 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
364 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
365 MLX5_SET(destroy_qp_in, in, uid, qp->uid);
366 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
370 atomic_dec(&dev->num_qps);
373 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
375 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
378 u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
379 u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
381 MLX5_SET(set_delay_drop_params_in, in, opcode,
382 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
383 MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
385 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
387 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
396 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
399 mbox->outlen = outlen;
400 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
401 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
402 if (!mbox->in || !mbox->out) {
411 static void mbox_free(struct mbox_info *mbox)
417 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
418 u32 opt_param_mask, void *qpc,
419 struct mbox_info *mbox, u16 uid)
424 #define MBOX_ALLOC(mbox, typ) \
425 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
427 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
429 MLX5_SET(typ##_in, in, opcode, _opcode); \
430 MLX5_SET(typ##_in, in, qpn, _qpn); \
431 MLX5_SET(typ##_in, in, uid, _uid); \
434 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
436 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
437 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
438 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
439 MLX5_ST_SZ_BYTES(qpc)); \
444 case MLX5_CMD_OP_2RST_QP:
445 if (MBOX_ALLOC(mbox, qp_2rst))
447 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
449 case MLX5_CMD_OP_2ERR_QP:
450 if (MBOX_ALLOC(mbox, qp_2err))
452 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
455 /* MODIFY with QPC */
456 case MLX5_CMD_OP_RST2INIT_QP:
457 if (MBOX_ALLOC(mbox, rst2init_qp))
459 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
460 opt_param_mask, qpc, uid);
462 case MLX5_CMD_OP_INIT2RTR_QP:
463 if (MBOX_ALLOC(mbox, init2rtr_qp))
465 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
466 opt_param_mask, qpc, uid);
468 case MLX5_CMD_OP_RTR2RTS_QP:
469 if (MBOX_ALLOC(mbox, rtr2rts_qp))
471 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
472 opt_param_mask, qpc, uid);
474 case MLX5_CMD_OP_RTS2RTS_QP:
475 if (MBOX_ALLOC(mbox, rts2rts_qp))
477 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
478 opt_param_mask, qpc, uid);
480 case MLX5_CMD_OP_SQERR2RTS_QP:
481 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
483 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
484 opt_param_mask, qpc, uid);
486 case MLX5_CMD_OP_INIT2INIT_QP:
487 if (MBOX_ALLOC(mbox, init2init_qp))
489 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
490 opt_param_mask, qpc, uid);
493 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
500 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
501 u32 opt_param_mask, void *qpc,
502 struct mlx5_core_qp *qp)
504 struct mbox_info mbox;
507 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
508 opt_param_mask, qpc, &mbox, qp->uid);
512 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
516 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
518 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
520 struct mlx5_qp_table *table = &dev->priv.qp_table;
522 memset(table, 0, sizeof(*table));
523 spin_lock_init(&table->lock);
524 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
525 mlx5_qp_debugfs_init(dev);
527 table->nb.notifier_call = rsc_event_notifier;
528 mlx5_notifier_register(dev, &table->nb);
531 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
533 struct mlx5_qp_table *table = &dev->priv.qp_table;
535 mlx5_notifier_unregister(dev, &table->nb);
536 mlx5_qp_debugfs_cleanup(dev);
539 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
540 u32 *out, int outlen)
542 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
544 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
545 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
546 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
548 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
550 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
551 u32 *out, int outlen)
553 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
554 struct mlx5_core_qp *qp = &dct->mqp;
556 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
557 MLX5_SET(query_dct_in, in, dctn, qp->qpn);
559 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
560 (void *)out, outlen);
562 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
564 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
566 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
567 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
570 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
571 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
573 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
576 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
578 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
580 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
581 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
583 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
584 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
585 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
587 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
589 static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
591 u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
592 u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
594 MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
595 MLX5_SET(destroy_rq_in, in, rqn, rqn);
596 MLX5_SET(destroy_rq_in, in, uid, uid);
597 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
600 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
601 struct mlx5_core_qp *rq)
606 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
610 rq->uid = MLX5_GET(create_rq_in, in, uid);
612 err = create_resource_common(dev, rq, MLX5_RES_RQ);
619 destroy_rq_tracked(dev, rq->qpn, rq->uid);
623 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
625 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
626 struct mlx5_core_qp *rq)
628 destroy_resource_common(dev, rq);
629 destroy_rq_tracked(dev, rq->qpn, rq->uid);
631 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
633 static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
635 u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
636 u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
638 MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
639 MLX5_SET(destroy_sq_in, in, sqn, sqn);
640 MLX5_SET(destroy_sq_in, in, uid, uid);
641 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
644 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
645 struct mlx5_core_qp *sq)
650 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
654 sq->uid = MLX5_GET(create_sq_in, in, uid);
656 err = create_resource_common(dev, sq, MLX5_RES_SQ);
663 destroy_sq_tracked(dev, sq->qpn, sq->uid);
667 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
669 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
670 struct mlx5_core_qp *sq)
672 destroy_resource_common(dev, sq);
673 destroy_sq_tracked(dev, sq->qpn, sq->uid);
675 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
677 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
679 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
680 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
683 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
684 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
686 *counter_id = MLX5_GET(alloc_q_counter_out, out,
690 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
692 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
694 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
695 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
697 MLX5_SET(dealloc_q_counter_in, in, opcode,
698 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
699 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
700 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
702 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
704 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
705 int reset, void *out, int out_size)
707 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
709 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
710 MLX5_SET(query_q_counter_in, in, clear, reset);
711 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
712 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
714 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
716 struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
718 enum mlx5_res_type res_type)
720 u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
721 struct mlx5_qp_table *table = &dev->priv.qp_table;
723 return mlx5_get_rsc(table, rsn);
725 EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
727 void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
729 mlx5_core_put_rsc(res);
731 EXPORT_SYMBOL_GPL(mlx5_core_res_put);