2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
41 #include "core_priv.h"
46 * Sort array elements by the netlink attribute name
48 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
49 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
50 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
52 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
53 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
54 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
55 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
56 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
57 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
58 .len = IB_DEVICE_NAME_MAX },
59 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
60 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
61 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
62 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
63 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
64 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
65 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
66 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
67 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
68 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
69 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
71 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
72 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
73 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
76 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
77 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
78 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
80 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
81 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
82 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
83 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
84 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
87 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
88 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
89 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
90 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
91 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
93 .len = sizeof(struct __kernel_sockaddr_storage) },
94 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
95 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
96 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
97 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
98 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
99 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
100 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
101 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
102 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
103 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
104 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
105 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
106 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
107 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
108 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
109 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
110 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
111 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
112 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
113 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
114 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
115 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
116 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
117 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
118 .len = sizeof(struct __kernel_sockaddr_storage) },
119 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
120 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
121 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
122 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
123 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
124 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
125 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
126 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
127 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
128 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
129 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
130 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
131 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
132 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
133 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
134 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
135 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
136 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
137 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
138 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
139 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
140 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
141 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
142 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
143 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
146 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
147 enum rdma_nldev_print_type print_type)
149 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
151 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
152 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
158 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
159 enum rdma_nldev_print_type print_type,
162 if (put_driver_name_print_type(msg, name, print_type))
164 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
170 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
171 enum rdma_nldev_print_type print_type,
174 if (put_driver_name_print_type(msg, name, print_type))
176 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
177 RDMA_NLDEV_ATTR_PAD))
183 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
185 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
188 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
190 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
193 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
196 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
198 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
200 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
203 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
205 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
207 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
210 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
212 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
214 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
216 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
217 dev_name(&device->dev)))
223 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
225 char fw[IB_FW_VERSION_NAME_MAX];
229 if (fill_nldev_handle(msg, device))
232 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
235 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
236 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
237 device->attrs.device_cap_flags,
238 RDMA_NLDEV_ATTR_PAD))
241 ib_get_device_fw_str(device, fw);
242 /* Device without FW has strlen(fw) = 0 */
243 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
246 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
247 be64_to_cpu(device->node_guid),
248 RDMA_NLDEV_ATTR_PAD))
250 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
251 be64_to_cpu(device->attrs.sys_image_guid),
252 RDMA_NLDEV_ATTR_PAD))
254 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
256 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
260 * Link type is determined on first port and mlx4 device
261 * which can potentially have two different link type for the same
262 * IB device is considered as better to be avoided in the future,
264 port = rdma_start_port(device);
265 if (rdma_cap_opa_mad(device, port))
266 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
267 else if (rdma_protocol_ib(device, port))
268 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
269 else if (rdma_protocol_iwarp(device, port))
270 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
271 else if (rdma_protocol_roce(device, port))
272 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
273 else if (rdma_protocol_usnic(device, port))
274 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
279 static int fill_port_info(struct sk_buff *msg,
280 struct ib_device *device, u32 port,
281 const struct net *net)
283 struct net_device *netdev = NULL;
284 struct ib_port_attr attr;
288 if (fill_nldev_handle(msg, device))
291 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
294 ret = ib_query_port(device, port, &attr);
298 if (rdma_protocol_ib(device, port)) {
299 BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
300 sizeof(attr.port_cap_flags2)) > sizeof(u64));
301 cap_flags = attr.port_cap_flags |
302 ((u64)attr.port_cap_flags2 << 32);
303 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
304 cap_flags, RDMA_NLDEV_ATTR_PAD))
306 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
307 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
309 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
311 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
313 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
316 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
318 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
321 netdev = ib_device_get_netdev(device, port);
322 if (netdev && net_eq(dev_net(netdev), net)) {
323 ret = nla_put_u32(msg,
324 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
327 ret = nla_put_string(msg,
328 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
337 static int fill_res_info_entry(struct sk_buff *msg,
338 const char *name, u64 curr)
340 struct nlattr *entry_attr;
342 entry_attr = nla_nest_start_noflag(msg,
343 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
347 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
349 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
350 RDMA_NLDEV_ATTR_PAD))
353 nla_nest_end(msg, entry_attr);
357 nla_nest_cancel(msg, entry_attr);
361 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
363 static const char * const names[RDMA_RESTRACK_MAX] = {
364 [RDMA_RESTRACK_PD] = "pd",
365 [RDMA_RESTRACK_CQ] = "cq",
366 [RDMA_RESTRACK_QP] = "qp",
367 [RDMA_RESTRACK_CM_ID] = "cm_id",
368 [RDMA_RESTRACK_MR] = "mr",
369 [RDMA_RESTRACK_CTX] = "ctx",
372 struct nlattr *table_attr;
375 if (fill_nldev_handle(msg, device))
378 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
385 curr = rdma_restrack_count(device, i);
386 ret = fill_res_info_entry(msg, names[i], curr);
391 nla_nest_end(msg, table_attr);
395 nla_nest_cancel(msg, table_attr);
399 static int fill_res_name_pid(struct sk_buff *msg,
400 struct rdma_restrack_entry *res)
403 * For user resources, user is should read /proc/PID/comm to get the
404 * name of the task file.
406 if (rdma_is_kernel_res(res)) {
407 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
411 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
412 task_pid_vnr(res->task)))
418 static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
419 struct rdma_restrack_entry *res)
421 if (!dev->ops.fill_res_entry)
423 return dev->ops.fill_res_entry(msg, res);
426 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
427 struct rdma_restrack_entry *res, uint32_t port)
429 struct ib_qp *qp = container_of(res, struct ib_qp, res);
430 struct ib_device *dev = qp->device;
431 struct ib_qp_init_attr qp_init_attr;
432 struct ib_qp_attr qp_attr;
435 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
439 if (port && port != qp_attr.port_num)
442 /* In create_qp() port is not set yet */
443 if (qp_attr.port_num &&
444 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
447 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
449 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
450 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
451 qp_attr.dest_qp_num))
453 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
458 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
461 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
462 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
463 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
464 qp_attr.path_mig_state))
467 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
469 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
472 if (!rdma_is_kernel_res(res) &&
473 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
476 if (fill_res_name_pid(msg, res))
479 if (fill_res_entry(dev, msg, res))
484 err: return -EMSGSIZE;
487 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
488 struct rdma_restrack_entry *res, uint32_t port)
490 struct rdma_id_private *id_priv =
491 container_of(res, struct rdma_id_private, res);
492 struct ib_device *dev = id_priv->id.device;
493 struct rdma_cm_id *cm_id = &id_priv->id;
495 if (port && port != cm_id->port_num)
498 if (cm_id->port_num &&
499 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
502 if (id_priv->qp_num) {
503 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
505 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
509 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
512 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
515 if (cm_id->route.addr.src_addr.ss_family &&
516 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
517 sizeof(cm_id->route.addr.src_addr),
518 &cm_id->route.addr.src_addr))
520 if (cm_id->route.addr.dst_addr.ss_family &&
521 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
522 sizeof(cm_id->route.addr.dst_addr),
523 &cm_id->route.addr.dst_addr))
526 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
529 if (fill_res_name_pid(msg, res))
532 if (fill_res_entry(dev, msg, res))
537 err: return -EMSGSIZE;
540 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
541 struct rdma_restrack_entry *res, uint32_t port)
543 struct ib_cq *cq = container_of(res, struct ib_cq, res);
544 struct ib_device *dev = cq->device;
546 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
548 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
549 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
552 /* Poll context is only valid for kernel CQs */
553 if (rdma_is_kernel_res(res) &&
554 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
557 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
560 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
562 if (!rdma_is_kernel_res(res) &&
563 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
564 cq->uobject->context->res.id))
567 if (fill_res_name_pid(msg, res))
570 if (fill_res_entry(dev, msg, res))
575 err: return -EMSGSIZE;
578 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
579 struct rdma_restrack_entry *res, uint32_t port)
581 struct ib_mr *mr = container_of(res, struct ib_mr, res);
582 struct ib_device *dev = mr->pd->device;
584 if (has_cap_net_admin) {
585 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
587 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
591 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
592 RDMA_NLDEV_ATTR_PAD))
595 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
598 if (!rdma_is_kernel_res(res) &&
599 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
602 if (fill_res_name_pid(msg, res))
605 if (fill_res_entry(dev, msg, res))
610 err: return -EMSGSIZE;
613 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
614 struct rdma_restrack_entry *res, uint32_t port)
616 struct ib_pd *pd = container_of(res, struct ib_pd, res);
617 struct ib_device *dev = pd->device;
619 if (has_cap_net_admin) {
620 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
623 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
624 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
625 pd->unsafe_global_rkey))
628 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
629 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
632 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
635 if (!rdma_is_kernel_res(res) &&
636 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
637 pd->uobject->context->res.id))
640 if (fill_res_name_pid(msg, res))
643 if (fill_res_entry(dev, msg, res))
648 err: return -EMSGSIZE;
651 static int fill_stat_counter_mode(struct sk_buff *msg,
652 struct rdma_counter *counter)
654 struct rdma_counter_mode *m = &counter->mode;
656 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
659 if (m->mode == RDMA_COUNTER_MODE_AUTO)
660 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
661 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
667 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
669 struct nlattr *entry_attr;
671 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
675 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
678 nla_nest_end(msg, entry_attr);
682 nla_nest_cancel(msg, entry_attr);
686 static int fill_stat_counter_qps(struct sk_buff *msg,
687 struct rdma_counter *counter)
689 struct rdma_restrack_entry *res;
690 struct rdma_restrack_root *rt;
691 struct nlattr *table_attr;
692 struct ib_qp *qp = NULL;
693 unsigned long id = 0;
696 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
698 rt = &counter->device->res[RDMA_RESTRACK_QP];
700 xa_for_each(&rt->xa, id, res) {
701 if (!rdma_is_visible_in_pid_ns(res))
704 qp = container_of(res, struct ib_qp, res);
705 if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
708 if (!qp->counter || (qp->counter->id != counter->id))
711 ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
717 nla_nest_end(msg, table_attr);
722 nla_nest_cancel(msg, table_attr);
726 static int fill_stat_hwcounter_entry(struct sk_buff *msg,
727 const char *name, u64 value)
729 struct nlattr *entry_attr;
731 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
735 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
738 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
739 value, RDMA_NLDEV_ATTR_PAD))
742 nla_nest_end(msg, entry_attr);
746 nla_nest_cancel(msg, entry_attr);
750 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
751 struct rdma_counter *counter)
753 struct rdma_hw_stats *st = counter->stats;
754 struct nlattr *table_attr;
757 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
761 for (i = 0; i < st->num_counters; i++)
762 if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
765 nla_nest_end(msg, table_attr);
769 nla_nest_cancel(msg, table_attr);
773 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
774 struct rdma_restrack_entry *res,
777 struct rdma_counter *counter =
778 container_of(res, struct rdma_counter, res);
780 if (port && port != counter->port)
783 /* Dump it even query failed */
784 rdma_counter_query_stats(counter);
786 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
787 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
788 fill_res_name_pid(msg, &counter->res) ||
789 fill_stat_counter_mode(msg, counter) ||
790 fill_stat_counter_qps(msg, counter) ||
791 fill_stat_counter_hwcounters(msg, counter))
797 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
798 struct netlink_ext_ack *extack)
800 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
801 struct ib_device *device;
806 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
807 nldev_policy, extack);
808 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
811 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
813 device = ib_device_get_by_index(sock_net(skb->sk), index);
817 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
823 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
824 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
827 err = fill_dev_info(msg, device);
833 ib_device_put(device);
834 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
839 ib_device_put(device);
843 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
844 struct netlink_ext_ack *extack)
846 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
847 struct ib_device *device;
851 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
852 nldev_policy, extack);
853 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
856 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
857 device = ib_device_get_by_index(sock_net(skb->sk), index);
861 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
862 char name[IB_DEVICE_NAME_MAX] = {};
864 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
866 err = ib_device_rename(device, name);
870 if (tb[RDMA_NLDEV_NET_NS_FD]) {
873 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
874 err = ib_device_set_netns_put(skb, device, ns_fd);
878 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
881 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
882 err = ib_device_set_dim(device, use_dim);
887 ib_device_put(device);
892 static int _nldev_get_dumpit(struct ib_device *device,
894 struct netlink_callback *cb,
897 int start = cb->args[0];
898 struct nlmsghdr *nlh;
903 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
904 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
907 if (fill_dev_info(skb, device)) {
908 nlmsg_cancel(skb, nlh);
916 out: cb->args[0] = idx;
920 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
923 * There is no need to take lock, because
924 * we are relying on ib_core's locking.
926 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
929 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
930 struct netlink_ext_ack *extack)
932 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
933 struct ib_device *device;
939 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
940 nldev_policy, extack);
942 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
943 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
946 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
947 device = ib_device_get_by_index(sock_net(skb->sk), index);
951 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
952 if (!rdma_is_port_valid(device, port)) {
957 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
963 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
964 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
967 err = fill_port_info(msg, device, port, sock_net(skb->sk));
972 ib_device_put(device);
974 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
979 ib_device_put(device);
983 static int nldev_port_get_dumpit(struct sk_buff *skb,
984 struct netlink_callback *cb)
986 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
987 struct ib_device *device;
988 int start = cb->args[0];
989 struct nlmsghdr *nlh;
995 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
997 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1000 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1001 device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1005 rdma_for_each_port (device, p) {
1007 * The dumpit function returns all information from specific
1008 * index. This specific index is taken from the netlink
1009 * messages request sent by user and it is available
1012 * Usually, the user doesn't fill this field and it causes
1013 * to return everything.
1021 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1023 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1024 RDMA_NLDEV_CMD_PORT_GET),
1027 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
1028 nlmsg_cancel(skb, nlh);
1032 nlmsg_end(skb, nlh);
1036 ib_device_put(device);
1041 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1042 struct netlink_ext_ack *extack)
1044 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1045 struct ib_device *device;
1046 struct sk_buff *msg;
1050 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1051 nldev_policy, extack);
1052 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1055 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1056 device = ib_device_get_by_index(sock_net(skb->sk), index);
1060 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1066 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1067 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1070 ret = fill_res_info(msg, device);
1074 nlmsg_end(msg, nlh);
1075 ib_device_put(device);
1076 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1081 ib_device_put(device);
1085 static int _nldev_res_get_dumpit(struct ib_device *device,
1086 struct sk_buff *skb,
1087 struct netlink_callback *cb,
1090 int start = cb->args[0];
1091 struct nlmsghdr *nlh;
1096 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1097 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1100 if (fill_res_info(skb, device)) {
1101 nlmsg_cancel(skb, nlh);
1104 nlmsg_end(skb, nlh);
1113 static int nldev_res_get_dumpit(struct sk_buff *skb,
1114 struct netlink_callback *cb)
1116 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1119 struct nldev_fill_res_entry {
1120 int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
1121 struct rdma_restrack_entry *res, u32 port);
1122 enum rdma_nldev_attr nldev_attr;
1123 enum rdma_nldev_command nldev_cmd;
1129 enum nldev_res_flags {
1130 NLDEV_PER_DEV = 1 << 0,
1133 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1134 [RDMA_RESTRACK_QP] = {
1135 .fill_res_func = fill_res_qp_entry,
1136 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
1137 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1138 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1139 .id = RDMA_NLDEV_ATTR_RES_LQPN,
1141 [RDMA_RESTRACK_CM_ID] = {
1142 .fill_res_func = fill_res_cm_id_entry,
1143 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
1144 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1145 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1146 .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1148 [RDMA_RESTRACK_CQ] = {
1149 .fill_res_func = fill_res_cq_entry,
1150 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
1151 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1152 .flags = NLDEV_PER_DEV,
1153 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1154 .id = RDMA_NLDEV_ATTR_RES_CQN,
1156 [RDMA_RESTRACK_MR] = {
1157 .fill_res_func = fill_res_mr_entry,
1158 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
1159 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1160 .flags = NLDEV_PER_DEV,
1161 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1162 .id = RDMA_NLDEV_ATTR_RES_MRN,
1164 [RDMA_RESTRACK_PD] = {
1165 .fill_res_func = fill_res_pd_entry,
1166 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
1167 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1168 .flags = NLDEV_PER_DEV,
1169 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1170 .id = RDMA_NLDEV_ATTR_RES_PDN,
1172 [RDMA_RESTRACK_COUNTER] = {
1173 .fill_res_func = fill_res_counter_entry,
1174 .nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
1175 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1176 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1177 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1181 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1182 struct netlink_ext_ack *extack,
1183 enum rdma_restrack_type res_type)
1185 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1186 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1187 struct rdma_restrack_entry *res;
1188 struct ib_device *device;
1189 u32 index, id, port = 0;
1190 bool has_cap_net_admin;
1191 struct sk_buff *msg;
1194 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1195 nldev_policy, extack);
1196 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1199 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1200 device = ib_device_get_by_index(sock_net(skb->sk), index);
1204 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1205 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1206 if (!rdma_is_port_valid(device, port)) {
1212 if ((port && fe->flags & NLDEV_PER_DEV) ||
1213 (!port && ~fe->flags & NLDEV_PER_DEV)) {
1218 id = nla_get_u32(tb[fe->id]);
1219 res = rdma_restrack_get_byid(device, res_type, id);
1225 if (!rdma_is_visible_in_pid_ns(res)) {
1230 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1236 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1237 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1240 if (fill_nldev_handle(msg, device)) {
1245 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1246 ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1247 rdma_restrack_put(res);
1251 nlmsg_end(msg, nlh);
1252 ib_device_put(device);
1253 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1258 rdma_restrack_put(res);
1260 ib_device_put(device);
1264 static int res_get_common_dumpit(struct sk_buff *skb,
1265 struct netlink_callback *cb,
1266 enum rdma_restrack_type res_type)
1268 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1269 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1270 struct rdma_restrack_entry *res;
1271 struct rdma_restrack_root *rt;
1272 int err, ret = 0, idx = 0;
1273 struct nlattr *table_attr;
1274 struct nlattr *entry_attr;
1275 struct ib_device *device;
1276 int start = cb->args[0];
1277 bool has_cap_net_admin;
1278 struct nlmsghdr *nlh;
1280 u32 index, port = 0;
1281 bool filled = false;
1283 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1284 nldev_policy, NULL);
1286 * Right now, we are expecting the device index to get res information,
1287 * but it is possible to extend this code to return all devices in
1288 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1289 * if it doesn't exist, we will iterate over all devices.
1291 * But it is not needed for now.
1293 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1296 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1297 device = ib_device_get_by_index(sock_net(skb->sk), index);
1302 * If no PORT_INDEX is supplied, we will return all QPs from that device
1304 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1305 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1306 if (!rdma_is_port_valid(device, port)) {
1312 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1313 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1316 if (fill_nldev_handle(skb, device)) {
1321 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1327 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1329 rt = &device->res[res_type];
1332 * FIXME: if the skip ahead is something common this loop should
1333 * use xas_for_each & xas_pause to optimize, we can have a lot of
1336 xa_for_each(&rt->xa, id, res) {
1337 if (!rdma_is_visible_in_pid_ns(res))
1340 if (idx < start || !rdma_restrack_get(res))
1347 entry_attr = nla_nest_start_noflag(skb, fe->entry);
1350 rdma_restrack_put(res);
1354 ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1355 rdma_restrack_put(res);
1358 nla_nest_cancel(skb, entry_attr);
1359 if (ret == -EMSGSIZE)
1365 nla_nest_end(skb, entry_attr);
1366 again: xa_lock(&rt->xa);
1372 nla_nest_end(skb, table_attr);
1373 nlmsg_end(skb, nlh);
1377 * No more entries to fill, cancel the message and
1378 * return 0 to mark end of dumpit.
1383 ib_device_put(device);
1387 nla_nest_cancel(skb, table_attr);
1390 nlmsg_cancel(skb, nlh);
1393 ib_device_put(device);
1397 #define RES_GET_FUNCS(name, type) \
1398 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
1399 struct netlink_callback *cb) \
1401 return res_get_common_dumpit(skb, cb, type); \
1403 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
1404 struct nlmsghdr *nlh, \
1405 struct netlink_ext_ack *extack) \
1407 return res_get_common_doit(skb, nlh, extack, type); \
1410 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1411 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1412 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1413 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1414 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1415 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1417 static LIST_HEAD(link_ops);
1418 static DECLARE_RWSEM(link_ops_rwsem);
1420 static const struct rdma_link_ops *link_ops_get(const char *type)
1422 const struct rdma_link_ops *ops;
1424 list_for_each_entry(ops, &link_ops, list) {
1425 if (!strcmp(ops->type, type))
1433 void rdma_link_register(struct rdma_link_ops *ops)
1435 down_write(&link_ops_rwsem);
1436 if (WARN_ON_ONCE(link_ops_get(ops->type)))
1438 list_add(&ops->list, &link_ops);
1440 up_write(&link_ops_rwsem);
1442 EXPORT_SYMBOL(rdma_link_register);
1444 void rdma_link_unregister(struct rdma_link_ops *ops)
1446 down_write(&link_ops_rwsem);
1447 list_del(&ops->list);
1448 up_write(&link_ops_rwsem);
1450 EXPORT_SYMBOL(rdma_link_unregister);
1452 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1453 struct netlink_ext_ack *extack)
1455 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1456 char ibdev_name[IB_DEVICE_NAME_MAX];
1457 const struct rdma_link_ops *ops;
1458 char ndev_name[IFNAMSIZ];
1459 struct net_device *ndev;
1460 char type[IFNAMSIZ];
1463 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1464 nldev_policy, extack);
1465 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1466 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1469 nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1470 sizeof(ibdev_name));
1471 if (strchr(ibdev_name, '%'))
1474 nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1475 nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1478 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1482 down_read(&link_ops_rwsem);
1483 ops = link_ops_get(type);
1484 #ifdef CONFIG_MODULES
1486 up_read(&link_ops_rwsem);
1487 request_module("rdma-link-%s", type);
1488 down_read(&link_ops_rwsem);
1489 ops = link_ops_get(type);
1492 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1493 up_read(&link_ops_rwsem);
1499 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1500 struct netlink_ext_ack *extack)
1502 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1503 struct ib_device *device;
1507 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1508 nldev_policy, extack);
1509 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1512 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1513 device = ib_device_get_by_index(sock_net(skb->sk), index);
1517 if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
1518 ib_device_put(device);
1522 ib_unregister_device_and_put(device);
1526 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1527 struct netlink_ext_ack *extack)
1529 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1530 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1531 struct ib_client_nl_info data = {};
1532 struct ib_device *ibdev = NULL;
1533 struct sk_buff *msg;
1537 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1539 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1542 nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1543 sizeof(client_name));
1545 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1546 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1547 ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1551 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1552 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1553 if (!rdma_is_port_valid(ibdev, data.port)) {
1560 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1564 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1569 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1570 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1571 RDMA_NLDEV_CMD_GET_CHARDEV),
1575 err = ib_get_client_nl_info(ibdev, client_name, &data);
1579 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1580 huge_encode_dev(data.cdev->devt),
1581 RDMA_NLDEV_ATTR_PAD);
1584 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1585 RDMA_NLDEV_ATTR_PAD);
1588 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1589 dev_name(data.cdev))) {
1594 nlmsg_end(msg, nlh);
1595 put_device(data.cdev);
1597 ib_device_put(ibdev);
1598 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1601 put_device(data.cdev);
1606 ib_device_put(ibdev);
1610 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1611 struct netlink_ext_ack *extack)
1613 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1614 struct sk_buff *msg;
1617 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1618 nldev_policy, extack);
1622 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1626 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1627 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1628 RDMA_NLDEV_CMD_SYS_GET),
1631 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1632 (u8)ib_devices_shared_netns);
1637 nlmsg_end(msg, nlh);
1638 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1641 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1642 struct netlink_ext_ack *extack)
1644 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1648 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1649 nldev_policy, extack);
1650 if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1653 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1654 /* Only 0 and 1 are supported */
1658 err = rdma_compatdev_set(enable);
1662 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1663 struct netlink_ext_ack *extack)
1665 u32 index, port, mode, mask = 0, qpn, cntn = 0;
1666 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1667 struct ib_device *device;
1668 struct sk_buff *msg;
1671 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1672 nldev_policy, extack);
1673 /* Currently only counter for QP is supported */
1674 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1675 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1676 !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1679 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1682 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1683 device = ib_device_get_by_index(sock_net(skb->sk), index);
1687 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1688 if (!rdma_is_port_valid(device, port)) {
1693 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1698 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1699 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1700 RDMA_NLDEV_CMD_STAT_SET),
1703 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1704 if (mode == RDMA_COUNTER_MODE_AUTO) {
1705 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1707 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1709 ret = rdma_counter_set_auto_mode(device, port,
1710 mask ? true : false, mask);
1714 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1715 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1716 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1717 ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1719 ret = rdma_counter_bind_qpn_alloc(device, port,
1725 if (fill_nldev_handle(msg, device) ||
1726 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1727 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1728 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1734 nlmsg_end(msg, nlh);
1735 ib_device_put(device);
1736 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1739 rdma_counter_unbind_qpn(device, port, qpn, cntn);
1743 ib_device_put(device);
1747 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1748 struct netlink_ext_ack *extack)
1750 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1751 struct ib_device *device;
1752 struct sk_buff *msg;
1753 u32 index, port, qpn, cntn;
1756 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1757 nldev_policy, extack);
1758 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1759 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1760 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1761 !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1764 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1767 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1768 device = ib_device_get_by_index(sock_net(skb->sk), index);
1772 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1773 if (!rdma_is_port_valid(device, port)) {
1778 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1783 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1784 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1785 RDMA_NLDEV_CMD_STAT_SET),
1788 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1789 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1790 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
1794 if (fill_nldev_handle(msg, device) ||
1795 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1796 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1797 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1802 nlmsg_end(msg, nlh);
1803 ib_device_put(device);
1804 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1807 rdma_counter_bind_qpn(device, port, qpn, cntn);
1811 ib_device_put(device);
1815 static int stat_get_doit_default_counter(struct sk_buff *skb,
1816 struct nlmsghdr *nlh,
1817 struct netlink_ext_ack *extack,
1818 struct nlattr *tb[])
1820 struct rdma_hw_stats *stats;
1821 struct nlattr *table_attr;
1822 struct ib_device *device;
1823 int ret, num_cnts, i;
1824 struct sk_buff *msg;
1828 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1831 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1832 device = ib_device_get_by_index(sock_net(skb->sk), index);
1836 if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
1841 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1842 if (!rdma_is_port_valid(device, port)) {
1847 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1853 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1854 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1855 RDMA_NLDEV_CMD_STAT_GET),
1858 if (fill_nldev_handle(msg, device) ||
1859 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
1864 stats = device->port_data ? device->port_data[port].hw_stats : NULL;
1865 if (stats == NULL) {
1869 mutex_lock(&stats->lock);
1871 num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
1877 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
1882 for (i = 0; i < num_cnts; i++) {
1883 v = stats->value[i] +
1884 rdma_counter_get_hwstat_value(device, port, i);
1885 if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
1890 nla_nest_end(msg, table_attr);
1892 mutex_unlock(&stats->lock);
1893 nlmsg_end(msg, nlh);
1894 ib_device_put(device);
1895 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1898 nla_nest_cancel(msg, table_attr);
1900 mutex_unlock(&stats->lock);
1904 ib_device_put(device);
1908 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1909 struct netlink_ext_ack *extack, struct nlattr *tb[])
1912 static enum rdma_nl_counter_mode mode;
1913 static enum rdma_nl_counter_mask mask;
1914 struct ib_device *device;
1915 struct sk_buff *msg;
1919 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
1920 return nldev_res_get_counter_doit(skb, nlh, extack);
1922 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
1923 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1926 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1927 device = ib_device_get_by_index(sock_net(skb->sk), index);
1931 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1932 if (!rdma_is_port_valid(device, port)) {
1937 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1943 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1944 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1945 RDMA_NLDEV_CMD_STAT_GET),
1948 ret = rdma_counter_get_mode(device, port, &mode, &mask);
1952 if (fill_nldev_handle(msg, device) ||
1953 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
1959 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
1960 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1965 nlmsg_end(msg, nlh);
1966 ib_device_put(device);
1967 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1972 ib_device_put(device);
1976 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1977 struct netlink_ext_ack *extack)
1979 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1982 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1983 nldev_policy, extack);
1987 if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
1988 return stat_get_doit_default_counter(skb, nlh, extack, tb);
1990 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
1991 case RDMA_NLDEV_ATTR_RES_QP:
1992 ret = stat_get_doit_qp(skb, nlh, extack, tb);
2003 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2004 struct netlink_callback *cb)
2006 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2009 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2010 nldev_policy, NULL);
2011 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2014 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2015 case RDMA_NLDEV_ATTR_RES_QP:
2016 ret = nldev_res_get_counter_dumpit(skb, cb);
2027 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2028 [RDMA_NLDEV_CMD_GET] = {
2029 .doit = nldev_get_doit,
2030 .dump = nldev_get_dumpit,
2032 [RDMA_NLDEV_CMD_GET_CHARDEV] = {
2033 .doit = nldev_get_chardev,
2035 [RDMA_NLDEV_CMD_SET] = {
2036 .doit = nldev_set_doit,
2037 .flags = RDMA_NL_ADMIN_PERM,
2039 [RDMA_NLDEV_CMD_NEWLINK] = {
2040 .doit = nldev_newlink,
2041 .flags = RDMA_NL_ADMIN_PERM,
2043 [RDMA_NLDEV_CMD_DELLINK] = {
2044 .doit = nldev_dellink,
2045 .flags = RDMA_NL_ADMIN_PERM,
2047 [RDMA_NLDEV_CMD_PORT_GET] = {
2048 .doit = nldev_port_get_doit,
2049 .dump = nldev_port_get_dumpit,
2051 [RDMA_NLDEV_CMD_RES_GET] = {
2052 .doit = nldev_res_get_doit,
2053 .dump = nldev_res_get_dumpit,
2055 [RDMA_NLDEV_CMD_RES_QP_GET] = {
2056 .doit = nldev_res_get_qp_doit,
2057 .dump = nldev_res_get_qp_dumpit,
2059 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2060 .doit = nldev_res_get_cm_id_doit,
2061 .dump = nldev_res_get_cm_id_dumpit,
2063 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
2064 .doit = nldev_res_get_cq_doit,
2065 .dump = nldev_res_get_cq_dumpit,
2067 [RDMA_NLDEV_CMD_RES_MR_GET] = {
2068 .doit = nldev_res_get_mr_doit,
2069 .dump = nldev_res_get_mr_dumpit,
2071 [RDMA_NLDEV_CMD_RES_PD_GET] = {
2072 .doit = nldev_res_get_pd_doit,
2073 .dump = nldev_res_get_pd_dumpit,
2075 [RDMA_NLDEV_CMD_SYS_GET] = {
2076 .doit = nldev_sys_get_doit,
2078 [RDMA_NLDEV_CMD_SYS_SET] = {
2079 .doit = nldev_set_sys_set_doit,
2081 [RDMA_NLDEV_CMD_STAT_SET] = {
2082 .doit = nldev_stat_set_doit,
2083 .flags = RDMA_NL_ADMIN_PERM,
2085 [RDMA_NLDEV_CMD_STAT_GET] = {
2086 .doit = nldev_stat_get_doit,
2087 .dump = nldev_stat_get_dumpit,
2089 [RDMA_NLDEV_CMD_STAT_DEL] = {
2090 .doit = nldev_stat_del_doit,
2091 .flags = RDMA_NL_ADMIN_PERM,
2095 void __init nldev_init(void)
2097 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2100 void __exit nldev_exit(void)
2102 rdma_nl_unregister(RDMA_NL_NLDEV);
2105 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);