2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include <rdma/restrack.h>
39 #include "rdma_core.h"
42 static int uverbs_free_ah(struct ib_uobject *uobject,
43 enum rdma_remove_reason why,
44 struct uverbs_attr_bundle *attrs)
46 return rdma_destroy_ah_user((struct ib_ah *)uobject->object,
47 RDMA_DESTROY_AH_SLEEPABLE,
48 &attrs->driver_udata);
51 static int uverbs_free_flow(struct ib_uobject *uobject,
52 enum rdma_remove_reason why,
53 struct uverbs_attr_bundle *attrs)
55 struct ib_flow *flow = (struct ib_flow *)uobject->object;
56 struct ib_uflow_object *uflow =
57 container_of(uobject, struct ib_uflow_object, uobject);
58 struct ib_qp *qp = flow->qp;
61 ret = flow->device->ops.destroy_flow(flow);
64 atomic_dec(&qp->usecnt);
65 ib_uverbs_flow_resources_free(uflow->resources);
71 static int uverbs_free_mw(struct ib_uobject *uobject,
72 enum rdma_remove_reason why,
73 struct uverbs_attr_bundle *attrs)
75 return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
78 static int uverbs_free_qp(struct ib_uobject *uobject,
79 enum rdma_remove_reason why,
80 struct uverbs_attr_bundle *attrs)
82 struct ib_qp *qp = uobject->object;
83 struct ib_uqp_object *uqp =
84 container_of(uobject, struct ib_uqp_object, uevent.uobject);
88 * If this is a user triggered destroy then do not allow destruction
89 * until the user cleans up all the mcast bindings. Unlike in other
90 * places we forcibly clean up the mcast attachments for !DESTROY
91 * because the mcast attaches are not ubojects and will not be
92 * destroyed by anything else during cleanup processing.
94 if (why == RDMA_REMOVE_DESTROY) {
95 if (!list_empty(&uqp->mcast_list))
97 } else if (qp == qp->real_qp) {
98 ib_uverbs_detach_umcast(qp, uqp);
101 ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
102 if (ib_is_destroy_retryable(ret, why, uobject))
106 atomic_dec(&uqp->uxrcd->refcnt);
108 ib_uverbs_release_uevent(&uqp->uevent);
112 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
113 enum rdma_remove_reason why,
114 struct uverbs_attr_bundle *attrs)
116 struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
117 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
120 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
121 if (ib_is_destroy_retryable(ret, why, uobject))
128 static int uverbs_free_wq(struct ib_uobject *uobject,
129 enum rdma_remove_reason why,
130 struct uverbs_attr_bundle *attrs)
132 struct ib_wq *wq = uobject->object;
133 struct ib_uwq_object *uwq =
134 container_of(uobject, struct ib_uwq_object, uevent.uobject);
137 ret = ib_destroy_wq(wq, &attrs->driver_udata);
138 if (ib_is_destroy_retryable(ret, why, uobject))
141 ib_uverbs_release_uevent(&uwq->uevent);
145 static int uverbs_free_srq(struct ib_uobject *uobject,
146 enum rdma_remove_reason why,
147 struct uverbs_attr_bundle *attrs)
149 struct ib_srq *srq = uobject->object;
150 struct ib_uevent_object *uevent =
151 container_of(uobject, struct ib_uevent_object, uobject);
152 enum ib_srq_type srq_type = srq->srq_type;
155 ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
156 if (ib_is_destroy_retryable(ret, why, uobject))
159 if (srq_type == IB_SRQT_XRC) {
160 struct ib_usrq_object *us =
161 container_of(uevent, struct ib_usrq_object, uevent);
163 atomic_dec(&us->uxrcd->refcnt);
166 ib_uverbs_release_uevent(uevent);
170 static int uverbs_free_xrcd(struct ib_uobject *uobject,
171 enum rdma_remove_reason why,
172 struct uverbs_attr_bundle *attrs)
174 struct ib_xrcd *xrcd = uobject->object;
175 struct ib_uxrcd_object *uxrcd =
176 container_of(uobject, struct ib_uxrcd_object, uobject);
179 ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
183 mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
184 ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
185 mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex);
190 static int uverbs_free_pd(struct ib_uobject *uobject,
191 enum rdma_remove_reason why,
192 struct uverbs_attr_bundle *attrs)
194 struct ib_pd *pd = uobject->object;
197 ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
201 ib_dealloc_pd_user(pd, &attrs->driver_udata);
205 void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
207 struct ib_uverbs_event *entry, *tmp;
209 spin_lock_irq(&event_queue->lock);
211 * The user must ensure that no new items are added to the event_list
212 * once is_closed is set.
214 event_queue->is_closed = 1;
215 spin_unlock_irq(&event_queue->lock);
216 wake_up_interruptible(&event_queue->poll_wait);
217 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
219 spin_lock_irq(&event_queue->lock);
220 list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
222 list_del(&entry->obj_list);
225 spin_unlock_irq(&event_queue->lock);
229 uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
230 enum rdma_remove_reason why)
232 struct ib_uverbs_completion_event_file *file =
233 container_of(uobj, struct ib_uverbs_completion_event_file,
236 ib_uverbs_free_event_queue(&file->ev_queue);
240 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
244 EXPORT_SYMBOL(uverbs_destroy_def_handler);
246 DECLARE_UVERBS_NAMED_OBJECT(
247 UVERBS_OBJECT_COMP_CHANNEL,
248 UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
249 uverbs_completion_event_file_destroy_uobj,
254 DECLARE_UVERBS_NAMED_OBJECT(
256 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
258 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
259 UVERBS_METHOD_MW_DESTROY,
260 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
262 UVERBS_ACCESS_DESTROY,
265 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
266 UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
267 &UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
269 DECLARE_UVERBS_NAMED_OBJECT(
271 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
274 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
275 UVERBS_METHOD_AH_DESTROY,
276 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
278 UVERBS_ACCESS_DESTROY,
281 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
282 UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah),
283 &UVERBS_METHOD(UVERBS_METHOD_AH_DESTROY));
285 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
286 UVERBS_METHOD_FLOW_DESTROY,
287 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_HANDLE,
289 UVERBS_ACCESS_DESTROY,
292 DECLARE_UVERBS_NAMED_OBJECT(
294 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
296 &UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
298 DECLARE_UVERBS_NAMED_OBJECT(
300 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
302 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
303 UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
304 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
305 UVERBS_OBJECT_RWQ_IND_TBL,
306 UVERBS_ACCESS_DESTROY,
309 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
310 UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl),
311 &UVERBS_METHOD(UVERBS_METHOD_RWQ_IND_TBL_DESTROY));
313 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
314 UVERBS_METHOD_XRCD_DESTROY,
315 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_XRCD_HANDLE,
317 UVERBS_ACCESS_DESTROY,
320 DECLARE_UVERBS_NAMED_OBJECT(
322 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
324 &UVERBS_METHOD(UVERBS_METHOD_XRCD_DESTROY));
326 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
327 UVERBS_METHOD_PD_DESTROY,
328 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_PD_HANDLE,
330 UVERBS_ACCESS_DESTROY,
333 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
334 UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd),
335 &UVERBS_METHOD(UVERBS_METHOD_PD_DESTROY));
337 const struct uapi_definition uverbs_def_obj_intf[] = {
338 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_PD,
339 UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
340 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
341 UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
342 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
343 UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
344 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
345 UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
346 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
347 UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
348 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
349 UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
350 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
351 UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
352 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
353 UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
354 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
355 UVERBS_OBJECT_RWQ_IND_TBL,
356 UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
357 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD,
358 UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),