2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_std_types.h>
34 #include "rdma_core.h"
38 static int uverbs_free_cq(struct ib_uobject *uobject,
39 enum rdma_remove_reason why,
40 struct uverbs_attr_bundle *attrs)
42 struct ib_cq *cq = uobject->object;
43 struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
44 struct ib_ucq_object *ucq =
45 container_of(uobject, struct ib_ucq_object, uevent.uobject);
48 ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
52 ib_uverbs_release_ucq(
53 ev_queue ? container_of(ev_queue,
54 struct ib_uverbs_completion_event_file,
61 static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
62 struct uverbs_attr_bundle *attrs)
64 struct ib_ucq_object *obj = container_of(
65 uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
66 typeof(*obj), uevent.uobject);
67 struct ib_device *ib_dev = attrs->context->device;
70 struct ib_cq_init_attr attr = {};
72 struct ib_uverbs_completion_event_file *ev_file = NULL;
73 struct ib_uobject *ev_file_uobj;
75 if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq)
78 ret = uverbs_copy_from(&attr.comp_vector, attrs,
79 UVERBS_ATTR_CREATE_CQ_COMP_VECTOR);
81 ret = uverbs_copy_from(&attr.cqe, attrs,
82 UVERBS_ATTR_CREATE_CQ_CQE);
84 ret = uverbs_copy_from(&user_handle, attrs,
85 UVERBS_ATTR_CREATE_CQ_USER_HANDLE);
89 ret = uverbs_get_flags32(&attr.flags, attrs,
90 UVERBS_ATTR_CREATE_CQ_FLAGS,
91 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION |
92 IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN);
96 ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
97 if (!IS_ERR(ev_file_uobj)) {
98 ev_file = container_of(ev_file_uobj,
99 struct ib_uverbs_completion_event_file,
101 uverbs_uobject_get(ev_file_uobj);
104 obj->uevent.event_file = ib_uverbs_get_async_event(
105 attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD);
107 if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
112 INIT_LIST_HEAD(&obj->comp_list);
113 INIT_LIST_HEAD(&obj->uevent.event_list);
115 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
123 cq->comp_handler = ib_uverbs_comp_handler;
124 cq->event_handler = ib_uverbs_cq_event_handler;
125 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
126 atomic_set(&cq->usecnt, 0);
128 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
129 rdma_restrack_set_name(&cq->res, NULL);
131 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
135 obj->uevent.uobject.object = cq;
136 obj->uevent.uobject.user_handle = user_handle;
137 rdma_restrack_add(&cq->res);
138 uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
140 ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
145 rdma_restrack_put(&cq->res);
148 if (obj->uevent.event_file)
149 uverbs_uobject_put(&obj->uevent.event_file->uobj);
151 uverbs_uobject_put(ev_file_uobj);
155 DECLARE_UVERBS_NAMED_METHOD(
156 UVERBS_METHOD_CQ_CREATE,
157 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE,
161 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
162 UVERBS_ATTR_TYPE(u32),
164 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
165 UVERBS_ATTR_TYPE(u64),
167 UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
168 UVERBS_OBJECT_COMP_CHANNEL,
171 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
172 UVERBS_ATTR_TYPE(u32),
174 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_CQ_FLAGS,
175 enum ib_uverbs_ex_create_cq_flags),
176 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
177 UVERBS_ATTR_TYPE(u32),
179 UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD,
180 UVERBS_OBJECT_ASYNC_EVENT,
185 static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
186 struct uverbs_attr_bundle *attrs)
188 struct ib_uobject *uobj =
189 uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
190 struct ib_ucq_object *obj =
191 container_of(uobj, struct ib_ucq_object, uevent.uobject);
192 struct ib_uverbs_destroy_cq_resp resp = {
193 .comp_events_reported = obj->comp_events_reported,
194 .async_events_reported = obj->uevent.events_reported
197 return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
201 DECLARE_UVERBS_NAMED_METHOD(
202 UVERBS_METHOD_CQ_DESTROY,
203 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE,
205 UVERBS_ACCESS_DESTROY,
207 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
208 UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
211 DECLARE_UVERBS_NAMED_OBJECT(
213 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), uverbs_free_cq),
214 &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
215 &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
218 const struct uapi_definition uverbs_def_obj_cq[] = {
219 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_CQ,
220 UAPI_DEF_OBJ_NEEDS_FN(destroy_cq)),