2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
41 #include "core_priv.h"
46 typedef int (*res_fill_func_t)(struct sk_buff*, bool,
47 struct rdma_restrack_entry*, uint32_t);
50 * Sort array elements by the netlink attribute name
52 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
53 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
54 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
55 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
56 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
57 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
58 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
59 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
60 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
61 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
62 .len = IB_DEVICE_NAME_MAX },
63 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
64 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
65 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
67 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
68 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
69 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
70 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
71 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
72 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
73 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
75 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
76 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
77 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
78 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
80 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
81 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
82 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
84 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
85 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
87 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
90 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
91 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
92 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
93 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
94 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
96 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
97 .len = sizeof(struct __kernel_sockaddr_storage) },
98 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
99 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
100 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
101 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
102 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
103 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
104 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
105 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
106 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
107 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
108 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
109 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
110 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
111 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
112 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
113 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
114 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
115 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
116 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
117 [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY },
118 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
119 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
120 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
121 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
122 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
123 .len = sizeof(struct __kernel_sockaddr_storage) },
124 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
125 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
126 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
127 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
128 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
129 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
130 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
131 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
132 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
133 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
134 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
135 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
136 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
137 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
138 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
139 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
140 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
141 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
142 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
143 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
144 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
145 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
146 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
147 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
148 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
151 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
152 enum rdma_nldev_print_type print_type)
154 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
156 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
157 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
163 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
164 enum rdma_nldev_print_type print_type,
167 if (put_driver_name_print_type(msg, name, print_type))
169 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
175 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
176 enum rdma_nldev_print_type print_type,
179 if (put_driver_name_print_type(msg, name, print_type))
181 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
182 RDMA_NLDEV_ATTR_PAD))
188 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
191 if (put_driver_name_print_type(msg, name,
192 RDMA_NLDEV_PRINT_TYPE_UNSPEC))
194 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
199 EXPORT_SYMBOL(rdma_nl_put_driver_string);
201 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
203 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
206 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
208 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
211 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
214 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
216 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
218 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
221 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
223 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
225 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
228 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
230 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
232 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
234 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
235 dev_name(&device->dev)))
241 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
243 char fw[IB_FW_VERSION_NAME_MAX];
247 if (fill_nldev_handle(msg, device))
250 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
253 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
254 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
255 device->attrs.device_cap_flags,
256 RDMA_NLDEV_ATTR_PAD))
259 ib_get_device_fw_str(device, fw);
260 /* Device without FW has strlen(fw) = 0 */
261 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
264 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
265 be64_to_cpu(device->node_guid),
266 RDMA_NLDEV_ATTR_PAD))
268 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
269 be64_to_cpu(device->attrs.sys_image_guid),
270 RDMA_NLDEV_ATTR_PAD))
272 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
274 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
278 * Link type is determined on first port and mlx4 device
279 * which can potentially have two different link type for the same
280 * IB device is considered as better to be avoided in the future,
282 port = rdma_start_port(device);
283 if (rdma_cap_opa_mad(device, port))
284 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
285 else if (rdma_protocol_ib(device, port))
286 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
287 else if (rdma_protocol_iwarp(device, port))
288 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
289 else if (rdma_protocol_roce(device, port))
290 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
291 else if (rdma_protocol_usnic(device, port))
292 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
297 static int fill_port_info(struct sk_buff *msg,
298 struct ib_device *device, u32 port,
299 const struct net *net)
301 struct net_device *netdev = NULL;
302 struct ib_port_attr attr;
306 if (fill_nldev_handle(msg, device))
309 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
312 ret = ib_query_port(device, port, &attr);
316 if (rdma_protocol_ib(device, port)) {
317 BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
318 sizeof(attr.port_cap_flags2)) > sizeof(u64));
319 cap_flags = attr.port_cap_flags |
320 ((u64)attr.port_cap_flags2 << 32);
321 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
322 cap_flags, RDMA_NLDEV_ATTR_PAD))
324 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
325 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
327 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
329 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
331 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
334 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
336 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
339 netdev = ib_device_get_netdev(device, port);
340 if (netdev && net_eq(dev_net(netdev), net)) {
341 ret = nla_put_u32(msg,
342 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
345 ret = nla_put_string(msg,
346 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
355 static int fill_res_info_entry(struct sk_buff *msg,
356 const char *name, u64 curr)
358 struct nlattr *entry_attr;
360 entry_attr = nla_nest_start_noflag(msg,
361 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
365 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
367 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
368 RDMA_NLDEV_ATTR_PAD))
371 nla_nest_end(msg, entry_attr);
375 nla_nest_cancel(msg, entry_attr);
379 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
381 static const char * const names[RDMA_RESTRACK_MAX] = {
382 [RDMA_RESTRACK_PD] = "pd",
383 [RDMA_RESTRACK_CQ] = "cq",
384 [RDMA_RESTRACK_QP] = "qp",
385 [RDMA_RESTRACK_CM_ID] = "cm_id",
386 [RDMA_RESTRACK_MR] = "mr",
387 [RDMA_RESTRACK_CTX] = "ctx",
390 struct nlattr *table_attr;
393 if (fill_nldev_handle(msg, device))
396 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
400 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
403 curr = rdma_restrack_count(device, i);
404 ret = fill_res_info_entry(msg, names[i], curr);
409 nla_nest_end(msg, table_attr);
413 nla_nest_cancel(msg, table_attr);
417 static int fill_res_name_pid(struct sk_buff *msg,
418 struct rdma_restrack_entry *res)
423 * For user resources, user is should read /proc/PID/comm to get the
424 * name of the task file.
426 if (rdma_is_kernel_res(res)) {
427 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
432 pid = task_pid_vnr(res->task);
434 * Task is dead and in zombie state.
435 * There is no need to print PID anymore.
439 * This part is racy, task can be killed and PID will
440 * be zero right here but it is ok, next query won't
441 * return PID. We don't promise real-time reflection
444 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
447 return err ? -EMSGSIZE : 0;
450 static int fill_res_qp_entry_query(struct sk_buff *msg,
451 struct rdma_restrack_entry *res,
452 struct ib_device *dev,
455 struct ib_qp_init_attr qp_init_attr;
456 struct ib_qp_attr qp_attr;
459 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
463 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
464 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
465 qp_attr.dest_qp_num))
467 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
472 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
475 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
476 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
477 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
478 qp_attr.path_mig_state))
481 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
483 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
486 if (dev->ops.fill_res_qp_entry)
487 return dev->ops.fill_res_qp_entry(msg, qp);
490 err: return -EMSGSIZE;
493 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
494 struct rdma_restrack_entry *res, uint32_t port)
496 struct ib_qp *qp = container_of(res, struct ib_qp, res);
497 struct ib_device *dev = qp->device;
500 if (port && port != qp->port)
503 /* In create_qp() port is not set yet */
504 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
507 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
511 if (!rdma_is_kernel_res(res) &&
512 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
515 ret = fill_res_name_pid(msg, res);
519 return fill_res_qp_entry_query(msg, res, dev, qp);
522 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
523 struct rdma_restrack_entry *res, uint32_t port)
525 struct ib_qp *qp = container_of(res, struct ib_qp, res);
526 struct ib_device *dev = qp->device;
528 if (port && port != qp->port)
530 if (!dev->ops.fill_res_qp_entry_raw)
532 return dev->ops.fill_res_qp_entry_raw(msg, qp);
535 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
536 struct rdma_restrack_entry *res, uint32_t port)
538 struct rdma_id_private *id_priv =
539 container_of(res, struct rdma_id_private, res);
540 struct ib_device *dev = id_priv->id.device;
541 struct rdma_cm_id *cm_id = &id_priv->id;
543 if (port && port != cm_id->port_num)
546 if (cm_id->port_num &&
547 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
550 if (id_priv->qp_num) {
551 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
553 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
557 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
560 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
563 if (cm_id->route.addr.src_addr.ss_family &&
564 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
565 sizeof(cm_id->route.addr.src_addr),
566 &cm_id->route.addr.src_addr))
568 if (cm_id->route.addr.dst_addr.ss_family &&
569 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
570 sizeof(cm_id->route.addr.dst_addr),
571 &cm_id->route.addr.dst_addr))
574 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
577 if (fill_res_name_pid(msg, res))
580 if (dev->ops.fill_res_cm_id_entry)
581 return dev->ops.fill_res_cm_id_entry(msg, cm_id);
584 err: return -EMSGSIZE;
587 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
588 struct rdma_restrack_entry *res, uint32_t port)
590 struct ib_cq *cq = container_of(res, struct ib_cq, res);
591 struct ib_device *dev = cq->device;
593 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
595 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
596 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
599 /* Poll context is only valid for kernel CQs */
600 if (rdma_is_kernel_res(res) &&
601 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
604 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
607 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
609 if (!rdma_is_kernel_res(res) &&
610 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
611 cq->uobject->uevent.uobject.context->res.id))
614 if (fill_res_name_pid(msg, res))
617 return (dev->ops.fill_res_cq_entry) ?
618 dev->ops.fill_res_cq_entry(msg, cq) : 0;
621 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
622 struct rdma_restrack_entry *res, uint32_t port)
624 struct ib_cq *cq = container_of(res, struct ib_cq, res);
625 struct ib_device *dev = cq->device;
627 if (!dev->ops.fill_res_cq_entry_raw)
629 return dev->ops.fill_res_cq_entry_raw(msg, cq);
632 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
633 struct rdma_restrack_entry *res, uint32_t port)
635 struct ib_mr *mr = container_of(res, struct ib_mr, res);
636 struct ib_device *dev = mr->pd->device;
638 if (has_cap_net_admin) {
639 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
641 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
645 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
646 RDMA_NLDEV_ATTR_PAD))
649 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
652 if (!rdma_is_kernel_res(res) &&
653 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
656 if (fill_res_name_pid(msg, res))
659 return (dev->ops.fill_res_mr_entry) ?
660 dev->ops.fill_res_mr_entry(msg, mr) :
664 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
665 struct rdma_restrack_entry *res, uint32_t port)
667 struct ib_mr *mr = container_of(res, struct ib_mr, res);
668 struct ib_device *dev = mr->pd->device;
670 if (!dev->ops.fill_res_mr_entry_raw)
672 return dev->ops.fill_res_mr_entry_raw(msg, mr);
675 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
676 struct rdma_restrack_entry *res, uint32_t port)
678 struct ib_pd *pd = container_of(res, struct ib_pd, res);
680 if (has_cap_net_admin) {
681 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
684 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
685 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
686 pd->unsafe_global_rkey))
689 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
690 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
693 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
696 if (!rdma_is_kernel_res(res) &&
697 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
698 pd->uobject->context->res.id))
701 return fill_res_name_pid(msg, res);
703 err: return -EMSGSIZE;
706 static int fill_stat_counter_mode(struct sk_buff *msg,
707 struct rdma_counter *counter)
709 struct rdma_counter_mode *m = &counter->mode;
711 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
714 if (m->mode == RDMA_COUNTER_MODE_AUTO) {
715 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
716 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
719 if ((m->mask & RDMA_COUNTER_MASK_PID) &&
720 fill_res_name_pid(msg, &counter->res))
727 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
729 struct nlattr *entry_attr;
731 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
735 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
738 nla_nest_end(msg, entry_attr);
742 nla_nest_cancel(msg, entry_attr);
746 static int fill_stat_counter_qps(struct sk_buff *msg,
747 struct rdma_counter *counter)
749 struct rdma_restrack_entry *res;
750 struct rdma_restrack_root *rt;
751 struct nlattr *table_attr;
752 struct ib_qp *qp = NULL;
753 unsigned long id = 0;
756 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
758 rt = &counter->device->res[RDMA_RESTRACK_QP];
760 xa_for_each(&rt->xa, id, res) {
761 qp = container_of(res, struct ib_qp, res);
762 if (!qp->counter || (qp->counter->id != counter->id))
765 ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
771 nla_nest_end(msg, table_attr);
776 nla_nest_cancel(msg, table_attr);
780 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
783 struct nlattr *entry_attr;
785 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
789 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
792 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
793 value, RDMA_NLDEV_ATTR_PAD))
796 nla_nest_end(msg, entry_attr);
800 nla_nest_cancel(msg, entry_attr);
803 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
805 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
806 struct rdma_restrack_entry *res, uint32_t port)
808 struct ib_mr *mr = container_of(res, struct ib_mr, res);
809 struct ib_device *dev = mr->pd->device;
811 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
814 if (dev->ops.fill_stat_mr_entry)
815 return dev->ops.fill_stat_mr_entry(msg, mr);
822 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
823 struct rdma_counter *counter)
825 struct rdma_hw_stats *st = counter->stats;
826 struct nlattr *table_attr;
829 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
833 for (i = 0; i < st->num_counters; i++)
834 if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
837 nla_nest_end(msg, table_attr);
841 nla_nest_cancel(msg, table_attr);
845 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
846 struct rdma_restrack_entry *res,
849 struct rdma_counter *counter =
850 container_of(res, struct rdma_counter, res);
852 if (port && port != counter->port)
855 /* Dump it even query failed */
856 rdma_counter_query_stats(counter);
858 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
859 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
860 fill_stat_counter_mode(msg, counter) ||
861 fill_stat_counter_qps(msg, counter) ||
862 fill_stat_counter_hwcounters(msg, counter))
868 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
869 struct netlink_ext_ack *extack)
871 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
872 struct ib_device *device;
877 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
878 nldev_policy, extack);
879 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
882 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
884 device = ib_device_get_by_index(sock_net(skb->sk), index);
888 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
894 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
895 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
898 err = fill_dev_info(msg, device);
904 ib_device_put(device);
905 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
910 ib_device_put(device);
914 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
915 struct netlink_ext_ack *extack)
917 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
918 struct ib_device *device;
922 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
923 nldev_policy, extack);
924 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
927 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
928 device = ib_device_get_by_index(sock_net(skb->sk), index);
932 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
933 char name[IB_DEVICE_NAME_MAX] = {};
935 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
937 if (strlen(name) == 0) {
941 err = ib_device_rename(device, name);
945 if (tb[RDMA_NLDEV_NET_NS_FD]) {
948 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
949 err = ib_device_set_netns_put(skb, device, ns_fd);
953 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
956 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
957 err = ib_device_set_dim(device, use_dim);
962 ib_device_put(device);
967 static int _nldev_get_dumpit(struct ib_device *device,
969 struct netlink_callback *cb,
972 int start = cb->args[0];
973 struct nlmsghdr *nlh;
978 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
979 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
982 if (fill_dev_info(skb, device)) {
983 nlmsg_cancel(skb, nlh);
991 out: cb->args[0] = idx;
995 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
998 * There is no need to take lock, because
999 * we are relying on ib_core's locking.
1001 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1004 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1005 struct netlink_ext_ack *extack)
1007 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1008 struct ib_device *device;
1009 struct sk_buff *msg;
1014 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1015 nldev_policy, extack);
1017 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1018 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1021 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1022 device = ib_device_get_by_index(sock_net(skb->sk), index);
1026 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1027 if (!rdma_is_port_valid(device, port)) {
1032 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1038 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1039 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1042 err = fill_port_info(msg, device, port, sock_net(skb->sk));
1046 nlmsg_end(msg, nlh);
1047 ib_device_put(device);
1049 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1054 ib_device_put(device);
1058 static int nldev_port_get_dumpit(struct sk_buff *skb,
1059 struct netlink_callback *cb)
1061 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1062 struct ib_device *device;
1063 int start = cb->args[0];
1064 struct nlmsghdr *nlh;
1070 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1071 nldev_policy, NULL);
1072 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1075 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1076 device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1080 rdma_for_each_port (device, p) {
1082 * The dumpit function returns all information from specific
1083 * index. This specific index is taken from the netlink
1084 * messages request sent by user and it is available
1087 * Usually, the user doesn't fill this field and it causes
1088 * to return everything.
1096 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1098 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1099 RDMA_NLDEV_CMD_PORT_GET),
1102 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
1103 nlmsg_cancel(skb, nlh);
1107 nlmsg_end(skb, nlh);
1111 ib_device_put(device);
1116 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1117 struct netlink_ext_ack *extack)
1119 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1120 struct ib_device *device;
1121 struct sk_buff *msg;
1125 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1126 nldev_policy, extack);
1127 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1130 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1131 device = ib_device_get_by_index(sock_net(skb->sk), index);
1135 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1141 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1142 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1145 ret = fill_res_info(msg, device);
1149 nlmsg_end(msg, nlh);
1150 ib_device_put(device);
1151 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1156 ib_device_put(device);
1160 static int _nldev_res_get_dumpit(struct ib_device *device,
1161 struct sk_buff *skb,
1162 struct netlink_callback *cb,
1165 int start = cb->args[0];
1166 struct nlmsghdr *nlh;
1171 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1172 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1175 if (fill_res_info(skb, device)) {
1176 nlmsg_cancel(skb, nlh);
1179 nlmsg_end(skb, nlh);
1188 static int nldev_res_get_dumpit(struct sk_buff *skb,
1189 struct netlink_callback *cb)
1191 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1194 struct nldev_fill_res_entry {
1195 enum rdma_nldev_attr nldev_attr;
1201 enum nldev_res_flags {
1202 NLDEV_PER_DEV = 1 << 0,
1205 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1206 [RDMA_RESTRACK_QP] = {
1207 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1208 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1209 .id = RDMA_NLDEV_ATTR_RES_LQPN,
1211 [RDMA_RESTRACK_CM_ID] = {
1212 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1213 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1214 .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1216 [RDMA_RESTRACK_CQ] = {
1217 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1218 .flags = NLDEV_PER_DEV,
1219 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1220 .id = RDMA_NLDEV_ATTR_RES_CQN,
1222 [RDMA_RESTRACK_MR] = {
1223 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1224 .flags = NLDEV_PER_DEV,
1225 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1226 .id = RDMA_NLDEV_ATTR_RES_MRN,
1228 [RDMA_RESTRACK_PD] = {
1229 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1230 .flags = NLDEV_PER_DEV,
1231 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1232 .id = RDMA_NLDEV_ATTR_RES_PDN,
1234 [RDMA_RESTRACK_COUNTER] = {
1235 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1236 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1237 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1241 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1242 struct netlink_ext_ack *extack,
1243 enum rdma_restrack_type res_type,
1244 res_fill_func_t fill_func)
1246 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1247 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1248 struct rdma_restrack_entry *res;
1249 struct ib_device *device;
1250 u32 index, id, port = 0;
1251 bool has_cap_net_admin;
1252 struct sk_buff *msg;
1255 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1256 nldev_policy, extack);
1257 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1260 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1261 device = ib_device_get_by_index(sock_net(skb->sk), index);
1265 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1266 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1267 if (!rdma_is_port_valid(device, port)) {
1273 if ((port && fe->flags & NLDEV_PER_DEV) ||
1274 (!port && ~fe->flags & NLDEV_PER_DEV)) {
1279 id = nla_get_u32(tb[fe->id]);
1280 res = rdma_restrack_get_byid(device, res_type, id);
1286 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1292 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1293 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1294 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1297 if (fill_nldev_handle(msg, device)) {
1302 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1304 ret = fill_func(msg, has_cap_net_admin, res, port);
1308 rdma_restrack_put(res);
1309 nlmsg_end(msg, nlh);
1310 ib_device_put(device);
1311 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1316 rdma_restrack_put(res);
1318 ib_device_put(device);
1322 static int res_get_common_dumpit(struct sk_buff *skb,
1323 struct netlink_callback *cb,
1324 enum rdma_restrack_type res_type,
1325 res_fill_func_t fill_func)
1327 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1328 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1329 struct rdma_restrack_entry *res;
1330 struct rdma_restrack_root *rt;
1331 int err, ret = 0, idx = 0;
1332 struct nlattr *table_attr;
1333 struct nlattr *entry_attr;
1334 struct ib_device *device;
1335 int start = cb->args[0];
1336 bool has_cap_net_admin;
1337 struct nlmsghdr *nlh;
1339 u32 index, port = 0;
1340 bool filled = false;
1342 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1343 nldev_policy, NULL);
1345 * Right now, we are expecting the device index to get res information,
1346 * but it is possible to extend this code to return all devices in
1347 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1348 * if it doesn't exist, we will iterate over all devices.
1350 * But it is not needed for now.
1352 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1355 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1356 device = ib_device_get_by_index(sock_net(skb->sk), index);
1361 * If no PORT_INDEX is supplied, we will return all QPs from that device
1363 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1364 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1365 if (!rdma_is_port_valid(device, port)) {
1371 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1372 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1373 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1376 if (fill_nldev_handle(skb, device)) {
1381 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1387 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1389 rt = &device->res[res_type];
1392 * FIXME: if the skip ahead is something common this loop should
1393 * use xas_for_each & xas_pause to optimize, we can have a lot of
1396 xa_for_each(&rt->xa, id, res) {
1397 if (idx < start || !rdma_restrack_get(res))
1404 entry_attr = nla_nest_start_noflag(skb, fe->entry);
1407 rdma_restrack_put(res);
1411 ret = fill_func(skb, has_cap_net_admin, res, port);
1413 rdma_restrack_put(res);
1416 nla_nest_cancel(skb, entry_attr);
1417 if (ret == -EMSGSIZE)
1423 nla_nest_end(skb, entry_attr);
1424 again: xa_lock(&rt->xa);
1430 nla_nest_end(skb, table_attr);
1431 nlmsg_end(skb, nlh);
1435 * No more entries to fill, cancel the message and
1436 * return 0 to mark end of dumpit.
1441 ib_device_put(device);
1445 nla_nest_cancel(skb, table_attr);
1448 nlmsg_cancel(skb, nlh);
1451 ib_device_put(device);
1455 #define RES_GET_FUNCS(name, type) \
1456 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
1457 struct netlink_callback *cb) \
1459 return res_get_common_dumpit(skb, cb, type, \
1460 fill_res_##name##_entry); \
1462 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
1463 struct nlmsghdr *nlh, \
1464 struct netlink_ext_ack *extack) \
1466 return res_get_common_doit(skb, nlh, extack, type, \
1467 fill_res_##name##_entry); \
1470 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1471 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1472 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1473 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1474 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1475 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1476 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1477 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1478 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1480 static LIST_HEAD(link_ops);
1481 static DECLARE_RWSEM(link_ops_rwsem);
1483 static const struct rdma_link_ops *link_ops_get(const char *type)
1485 const struct rdma_link_ops *ops;
1487 list_for_each_entry(ops, &link_ops, list) {
1488 if (!strcmp(ops->type, type))
1496 void rdma_link_register(struct rdma_link_ops *ops)
1498 down_write(&link_ops_rwsem);
1499 if (WARN_ON_ONCE(link_ops_get(ops->type)))
1501 list_add(&ops->list, &link_ops);
1503 up_write(&link_ops_rwsem);
1505 EXPORT_SYMBOL(rdma_link_register);
1507 void rdma_link_unregister(struct rdma_link_ops *ops)
1509 down_write(&link_ops_rwsem);
1510 list_del(&ops->list);
1511 up_write(&link_ops_rwsem);
1513 EXPORT_SYMBOL(rdma_link_unregister);
1515 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1516 struct netlink_ext_ack *extack)
1518 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1519 char ibdev_name[IB_DEVICE_NAME_MAX];
1520 const struct rdma_link_ops *ops;
1521 char ndev_name[IFNAMSIZ];
1522 struct net_device *ndev;
1523 char type[IFNAMSIZ];
1526 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1527 nldev_policy, extack);
1528 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1529 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1532 nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1533 sizeof(ibdev_name));
1534 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1537 nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1538 nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1541 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1545 down_read(&link_ops_rwsem);
1546 ops = link_ops_get(type);
1547 #ifdef CONFIG_MODULES
1549 up_read(&link_ops_rwsem);
1550 request_module("rdma-link-%s", type);
1551 down_read(&link_ops_rwsem);
1552 ops = link_ops_get(type);
1555 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1556 up_read(&link_ops_rwsem);
1562 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1563 struct netlink_ext_ack *extack)
1565 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1566 struct ib_device *device;
1570 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1571 nldev_policy, extack);
1572 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1575 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1576 device = ib_device_get_by_index(sock_net(skb->sk), index);
1580 if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
1581 ib_device_put(device);
1585 ib_unregister_device_and_put(device);
1589 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1590 struct netlink_ext_ack *extack)
1592 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1593 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1594 struct ib_client_nl_info data = {};
1595 struct ib_device *ibdev = NULL;
1596 struct sk_buff *msg;
1600 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1602 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1605 nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1606 sizeof(client_name));
1608 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1609 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1610 ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1614 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1615 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1616 if (!rdma_is_port_valid(ibdev, data.port)) {
1623 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1627 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1632 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1633 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1634 RDMA_NLDEV_CMD_GET_CHARDEV),
1638 err = ib_get_client_nl_info(ibdev, client_name, &data);
1642 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1643 huge_encode_dev(data.cdev->devt),
1644 RDMA_NLDEV_ATTR_PAD);
1647 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1648 RDMA_NLDEV_ATTR_PAD);
1651 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1652 dev_name(data.cdev))) {
1657 nlmsg_end(msg, nlh);
1658 put_device(data.cdev);
1660 ib_device_put(ibdev);
1661 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1664 put_device(data.cdev);
1669 ib_device_put(ibdev);
1673 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1674 struct netlink_ext_ack *extack)
1676 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1677 struct sk_buff *msg;
1680 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1681 nldev_policy, extack);
1685 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1689 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1690 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1691 RDMA_NLDEV_CMD_SYS_GET),
1694 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1695 (u8)ib_devices_shared_netns);
1700 nlmsg_end(msg, nlh);
1701 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1704 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1705 struct netlink_ext_ack *extack)
1707 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1711 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1712 nldev_policy, extack);
1713 if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1716 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1717 /* Only 0 and 1 are supported */
1721 err = rdma_compatdev_set(enable);
1725 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1726 struct netlink_ext_ack *extack)
1728 u32 index, port, mode, mask = 0, qpn, cntn = 0;
1729 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1730 struct ib_device *device;
1731 struct sk_buff *msg;
1734 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1735 nldev_policy, extack);
1736 /* Currently only counter for QP is supported */
1737 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1738 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1739 !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1742 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1745 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1746 device = ib_device_get_by_index(sock_net(skb->sk), index);
1750 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1751 if (!rdma_is_port_valid(device, port)) {
1756 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1761 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1762 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1763 RDMA_NLDEV_CMD_STAT_SET),
1766 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1767 if (mode == RDMA_COUNTER_MODE_AUTO) {
1768 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1770 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1772 ret = rdma_counter_set_auto_mode(device, port,
1773 mask ? true : false, mask);
1777 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
1779 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1780 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1781 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1782 ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1784 ret = rdma_counter_bind_qpn_alloc(device, port,
1790 if (fill_nldev_handle(msg, device) ||
1791 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1792 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1793 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1799 nlmsg_end(msg, nlh);
1800 ib_device_put(device);
1801 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1804 rdma_counter_unbind_qpn(device, port, qpn, cntn);
1808 ib_device_put(device);
1812 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1813 struct netlink_ext_ack *extack)
1815 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1816 struct ib_device *device;
1817 struct sk_buff *msg;
1818 u32 index, port, qpn, cntn;
1821 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1822 nldev_policy, extack);
1823 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1824 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1825 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1826 !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1829 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1832 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1833 device = ib_device_get_by_index(sock_net(skb->sk), index);
1837 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1838 if (!rdma_is_port_valid(device, port)) {
1843 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1848 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1849 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1850 RDMA_NLDEV_CMD_STAT_SET),
1853 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1854 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1855 if (fill_nldev_handle(msg, device) ||
1856 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1857 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1858 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1863 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
1867 nlmsg_end(msg, nlh);
1868 ib_device_put(device);
1869 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1874 ib_device_put(device);
1878 static int stat_get_doit_default_counter(struct sk_buff *skb,
1879 struct nlmsghdr *nlh,
1880 struct netlink_ext_ack *extack,
1881 struct nlattr *tb[])
1883 struct rdma_hw_stats *stats;
1884 struct nlattr *table_attr;
1885 struct ib_device *device;
1886 int ret, num_cnts, i;
1887 struct sk_buff *msg;
1891 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1894 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1895 device = ib_device_get_by_index(sock_net(skb->sk), index);
1899 if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
1904 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1905 if (!rdma_is_port_valid(device, port)) {
1910 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1916 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1917 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1918 RDMA_NLDEV_CMD_STAT_GET),
1921 if (fill_nldev_handle(msg, device) ||
1922 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
1927 stats = device->port_data ? device->port_data[port].hw_stats : NULL;
1928 if (stats == NULL) {
1932 mutex_lock(&stats->lock);
1934 num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
1940 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
1945 for (i = 0; i < num_cnts; i++) {
1946 v = stats->value[i] +
1947 rdma_counter_get_hwstat_value(device, port, i);
1948 if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
1953 nla_nest_end(msg, table_attr);
1955 mutex_unlock(&stats->lock);
1956 nlmsg_end(msg, nlh);
1957 ib_device_put(device);
1958 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1961 nla_nest_cancel(msg, table_attr);
1963 mutex_unlock(&stats->lock);
1967 ib_device_put(device);
1971 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1972 struct netlink_ext_ack *extack, struct nlattr *tb[])
1975 static enum rdma_nl_counter_mode mode;
1976 static enum rdma_nl_counter_mask mask;
1977 struct ib_device *device;
1978 struct sk_buff *msg;
1982 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
1983 return nldev_res_get_counter_doit(skb, nlh, extack);
1985 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
1986 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1989 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1990 device = ib_device_get_by_index(sock_net(skb->sk), index);
1994 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1995 if (!rdma_is_port_valid(device, port)) {
2000 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2006 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2007 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2008 RDMA_NLDEV_CMD_STAT_GET),
2011 ret = rdma_counter_get_mode(device, port, &mode, &mask);
2015 if (fill_nldev_handle(msg, device) ||
2016 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2017 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2022 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2023 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2028 nlmsg_end(msg, nlh);
2029 ib_device_put(device);
2030 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2035 ib_device_put(device);
2039 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2040 struct netlink_ext_ack *extack)
2042 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2045 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2046 nldev_policy, extack);
2050 if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
2051 return stat_get_doit_default_counter(skb, nlh, extack, tb);
2053 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2054 case RDMA_NLDEV_ATTR_RES_QP:
2055 ret = stat_get_doit_qp(skb, nlh, extack, tb);
2057 case RDMA_NLDEV_ATTR_RES_MR:
2058 ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
2059 fill_stat_mr_entry);
2069 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2070 struct netlink_callback *cb)
2072 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2075 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2076 nldev_policy, NULL);
2077 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2080 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2081 case RDMA_NLDEV_ATTR_RES_QP:
2082 ret = nldev_res_get_counter_dumpit(skb, cb);
2084 case RDMA_NLDEV_ATTR_RES_MR:
2085 ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
2086 fill_stat_mr_entry);
2096 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2097 [RDMA_NLDEV_CMD_GET] = {
2098 .doit = nldev_get_doit,
2099 .dump = nldev_get_dumpit,
2101 [RDMA_NLDEV_CMD_GET_CHARDEV] = {
2102 .doit = nldev_get_chardev,
2104 [RDMA_NLDEV_CMD_SET] = {
2105 .doit = nldev_set_doit,
2106 .flags = RDMA_NL_ADMIN_PERM,
2108 [RDMA_NLDEV_CMD_NEWLINK] = {
2109 .doit = nldev_newlink,
2110 .flags = RDMA_NL_ADMIN_PERM,
2112 [RDMA_NLDEV_CMD_DELLINK] = {
2113 .doit = nldev_dellink,
2114 .flags = RDMA_NL_ADMIN_PERM,
2116 [RDMA_NLDEV_CMD_PORT_GET] = {
2117 .doit = nldev_port_get_doit,
2118 .dump = nldev_port_get_dumpit,
2120 [RDMA_NLDEV_CMD_RES_GET] = {
2121 .doit = nldev_res_get_doit,
2122 .dump = nldev_res_get_dumpit,
2124 [RDMA_NLDEV_CMD_RES_QP_GET] = {
2125 .doit = nldev_res_get_qp_doit,
2126 .dump = nldev_res_get_qp_dumpit,
2128 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2129 .doit = nldev_res_get_cm_id_doit,
2130 .dump = nldev_res_get_cm_id_dumpit,
2132 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
2133 .doit = nldev_res_get_cq_doit,
2134 .dump = nldev_res_get_cq_dumpit,
2136 [RDMA_NLDEV_CMD_RES_MR_GET] = {
2137 .doit = nldev_res_get_mr_doit,
2138 .dump = nldev_res_get_mr_dumpit,
2140 [RDMA_NLDEV_CMD_RES_PD_GET] = {
2141 .doit = nldev_res_get_pd_doit,
2142 .dump = nldev_res_get_pd_dumpit,
2144 [RDMA_NLDEV_CMD_SYS_GET] = {
2145 .doit = nldev_sys_get_doit,
2147 [RDMA_NLDEV_CMD_SYS_SET] = {
2148 .doit = nldev_set_sys_set_doit,
2150 [RDMA_NLDEV_CMD_STAT_SET] = {
2151 .doit = nldev_stat_set_doit,
2152 .flags = RDMA_NL_ADMIN_PERM,
2154 [RDMA_NLDEV_CMD_STAT_GET] = {
2155 .doit = nldev_stat_get_doit,
2156 .dump = nldev_stat_get_dumpit,
2158 [RDMA_NLDEV_CMD_STAT_DEL] = {
2159 .doit = nldev_stat_del_doit,
2160 .flags = RDMA_NL_ADMIN_PERM,
2162 [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
2163 .doit = nldev_res_get_qp_raw_doit,
2164 .dump = nldev_res_get_qp_raw_dumpit,
2165 .flags = RDMA_NL_ADMIN_PERM,
2167 [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
2168 .doit = nldev_res_get_cq_raw_doit,
2169 .dump = nldev_res_get_cq_raw_dumpit,
2170 .flags = RDMA_NL_ADMIN_PERM,
2172 [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
2173 .doit = nldev_res_get_mr_raw_doit,
2174 .dump = nldev_res_get_mr_raw_dumpit,
2175 .flags = RDMA_NL_ADMIN_PERM,
2179 void __init nldev_init(void)
2181 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2184 void __exit nldev_exit(void)
2186 rdma_nl_unregister(RDMA_NL_NLDEV);
2189 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);