2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
41 #include "core_priv.h"
46 typedef int (*res_fill_func_t)(struct sk_buff*, bool,
47 struct rdma_restrack_entry*, uint32_t);
50 * Sort array elements by the netlink attribute name
52 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
53 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
54 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
55 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
56 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
57 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
58 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
59 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
60 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
61 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
62 .len = IB_DEVICE_NAME_MAX },
63 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
64 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
65 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
67 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
68 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
69 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
70 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
71 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
72 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
73 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
75 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
76 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
77 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
78 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
80 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
81 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
82 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
84 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
85 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
87 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
90 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
91 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
92 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
93 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
94 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
98 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
99 .len = sizeof(struct __kernel_sockaddr_storage) },
100 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
101 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
102 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
103 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
104 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
105 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
106 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
107 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
108 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
109 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
110 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
111 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
112 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
113 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
114 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
115 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
116 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
117 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
118 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
119 [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY },
120 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
121 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
122 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
123 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
124 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
125 .len = sizeof(struct __kernel_sockaddr_storage) },
126 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
127 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
128 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
129 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
130 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
131 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
132 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
133 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
134 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
135 [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
136 [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
137 [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
138 [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
139 [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
140 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
141 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
142 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
143 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
144 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
145 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
146 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
147 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
148 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
149 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
150 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
151 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
152 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
153 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
154 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
155 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
156 [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
159 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
160 enum rdma_nldev_print_type print_type)
162 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
164 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
165 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
171 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
172 enum rdma_nldev_print_type print_type,
175 if (put_driver_name_print_type(msg, name, print_type))
177 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
183 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
184 enum rdma_nldev_print_type print_type,
187 if (put_driver_name_print_type(msg, name, print_type))
189 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
190 RDMA_NLDEV_ATTR_PAD))
196 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
199 if (put_driver_name_print_type(msg, name,
200 RDMA_NLDEV_PRINT_TYPE_UNSPEC))
202 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
207 EXPORT_SYMBOL(rdma_nl_put_driver_string);
209 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
211 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
214 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
216 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
219 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
222 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
224 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
226 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
229 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
231 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
233 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
236 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
238 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
240 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
242 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
243 dev_name(&device->dev)))
249 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
251 char fw[IB_FW_VERSION_NAME_MAX];
255 if (fill_nldev_handle(msg, device))
258 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
261 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
262 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
263 device->attrs.device_cap_flags,
264 RDMA_NLDEV_ATTR_PAD))
267 ib_get_device_fw_str(device, fw);
268 /* Device without FW has strlen(fw) = 0 */
269 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
272 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
273 be64_to_cpu(device->node_guid),
274 RDMA_NLDEV_ATTR_PAD))
276 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
277 be64_to_cpu(device->attrs.sys_image_guid),
278 RDMA_NLDEV_ATTR_PAD))
280 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
282 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
286 * Link type is determined on first port and mlx4 device
287 * which can potentially have two different link type for the same
288 * IB device is considered as better to be avoided in the future,
290 port = rdma_start_port(device);
291 if (rdma_cap_opa_mad(device, port))
292 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
293 else if (rdma_protocol_ib(device, port))
294 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
295 else if (rdma_protocol_iwarp(device, port))
296 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
297 else if (rdma_protocol_roce(device, port))
298 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
299 else if (rdma_protocol_usnic(device, port))
300 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
305 static int fill_port_info(struct sk_buff *msg,
306 struct ib_device *device, u32 port,
307 const struct net *net)
309 struct net_device *netdev = NULL;
310 struct ib_port_attr attr;
314 if (fill_nldev_handle(msg, device))
317 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
320 ret = ib_query_port(device, port, &attr);
324 if (rdma_protocol_ib(device, port)) {
325 BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
326 sizeof(attr.port_cap_flags2)) > sizeof(u64));
327 cap_flags = attr.port_cap_flags |
328 ((u64)attr.port_cap_flags2 << 32);
329 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
330 cap_flags, RDMA_NLDEV_ATTR_PAD))
332 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
333 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
335 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
337 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
339 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
342 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
344 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
347 netdev = ib_device_get_netdev(device, port);
348 if (netdev && net_eq(dev_net(netdev), net)) {
349 ret = nla_put_u32(msg,
350 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
353 ret = nla_put_string(msg,
354 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
363 static int fill_res_info_entry(struct sk_buff *msg,
364 const char *name, u64 curr)
366 struct nlattr *entry_attr;
368 entry_attr = nla_nest_start_noflag(msg,
369 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
373 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
375 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
376 RDMA_NLDEV_ATTR_PAD))
379 nla_nest_end(msg, entry_attr);
383 nla_nest_cancel(msg, entry_attr);
387 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
389 static const char * const names[RDMA_RESTRACK_MAX] = {
390 [RDMA_RESTRACK_PD] = "pd",
391 [RDMA_RESTRACK_CQ] = "cq",
392 [RDMA_RESTRACK_QP] = "qp",
393 [RDMA_RESTRACK_CM_ID] = "cm_id",
394 [RDMA_RESTRACK_MR] = "mr",
395 [RDMA_RESTRACK_CTX] = "ctx",
396 [RDMA_RESTRACK_SRQ] = "srq",
399 struct nlattr *table_attr;
402 if (fill_nldev_handle(msg, device))
405 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
409 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
412 curr = rdma_restrack_count(device, i);
413 ret = fill_res_info_entry(msg, names[i], curr);
418 nla_nest_end(msg, table_attr);
422 nla_nest_cancel(msg, table_attr);
426 static int fill_res_name_pid(struct sk_buff *msg,
427 struct rdma_restrack_entry *res)
432 * For user resources, user is should read /proc/PID/comm to get the
433 * name of the task file.
435 if (rdma_is_kernel_res(res)) {
436 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
441 pid = task_pid_vnr(res->task);
443 * Task is dead and in zombie state.
444 * There is no need to print PID anymore.
448 * This part is racy, task can be killed and PID will
449 * be zero right here but it is ok, next query won't
450 * return PID. We don't promise real-time reflection
453 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
456 return err ? -EMSGSIZE : 0;
459 static int fill_res_qp_entry_query(struct sk_buff *msg,
460 struct rdma_restrack_entry *res,
461 struct ib_device *dev,
464 struct ib_qp_init_attr qp_init_attr;
465 struct ib_qp_attr qp_attr;
468 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
472 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
473 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
474 qp_attr.dest_qp_num))
476 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
481 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
484 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
485 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
486 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
487 qp_attr.path_mig_state))
490 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
492 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
495 if (dev->ops.fill_res_qp_entry)
496 return dev->ops.fill_res_qp_entry(msg, qp);
499 err: return -EMSGSIZE;
502 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
503 struct rdma_restrack_entry *res, uint32_t port)
505 struct ib_qp *qp = container_of(res, struct ib_qp, res);
506 struct ib_device *dev = qp->device;
509 if (port && port != qp->port)
512 /* In create_qp() port is not set yet */
513 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
516 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
520 if (!rdma_is_kernel_res(res) &&
521 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
524 ret = fill_res_name_pid(msg, res);
528 return fill_res_qp_entry_query(msg, res, dev, qp);
531 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
532 struct rdma_restrack_entry *res, uint32_t port)
534 struct ib_qp *qp = container_of(res, struct ib_qp, res);
535 struct ib_device *dev = qp->device;
537 if (port && port != qp->port)
539 if (!dev->ops.fill_res_qp_entry_raw)
541 return dev->ops.fill_res_qp_entry_raw(msg, qp);
544 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
545 struct rdma_restrack_entry *res, uint32_t port)
547 struct rdma_id_private *id_priv =
548 container_of(res, struct rdma_id_private, res);
549 struct ib_device *dev = id_priv->id.device;
550 struct rdma_cm_id *cm_id = &id_priv->id;
552 if (port && port != cm_id->port_num)
555 if (cm_id->port_num &&
556 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
559 if (id_priv->qp_num) {
560 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
562 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
566 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
569 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
572 if (cm_id->route.addr.src_addr.ss_family &&
573 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
574 sizeof(cm_id->route.addr.src_addr),
575 &cm_id->route.addr.src_addr))
577 if (cm_id->route.addr.dst_addr.ss_family &&
578 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
579 sizeof(cm_id->route.addr.dst_addr),
580 &cm_id->route.addr.dst_addr))
583 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
586 if (fill_res_name_pid(msg, res))
589 if (dev->ops.fill_res_cm_id_entry)
590 return dev->ops.fill_res_cm_id_entry(msg, cm_id);
593 err: return -EMSGSIZE;
596 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
597 struct rdma_restrack_entry *res, uint32_t port)
599 struct ib_cq *cq = container_of(res, struct ib_cq, res);
600 struct ib_device *dev = cq->device;
602 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
604 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
605 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
608 /* Poll context is only valid for kernel CQs */
609 if (rdma_is_kernel_res(res) &&
610 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
613 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
616 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
618 if (!rdma_is_kernel_res(res) &&
619 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
620 cq->uobject->uevent.uobject.context->res.id))
623 if (fill_res_name_pid(msg, res))
626 return (dev->ops.fill_res_cq_entry) ?
627 dev->ops.fill_res_cq_entry(msg, cq) : 0;
630 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
631 struct rdma_restrack_entry *res, uint32_t port)
633 struct ib_cq *cq = container_of(res, struct ib_cq, res);
634 struct ib_device *dev = cq->device;
636 if (!dev->ops.fill_res_cq_entry_raw)
638 return dev->ops.fill_res_cq_entry_raw(msg, cq);
641 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
642 struct rdma_restrack_entry *res, uint32_t port)
644 struct ib_mr *mr = container_of(res, struct ib_mr, res);
645 struct ib_device *dev = mr->pd->device;
647 if (has_cap_net_admin) {
648 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
650 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
654 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
655 RDMA_NLDEV_ATTR_PAD))
658 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
661 if (!rdma_is_kernel_res(res) &&
662 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
665 if (fill_res_name_pid(msg, res))
668 return (dev->ops.fill_res_mr_entry) ?
669 dev->ops.fill_res_mr_entry(msg, mr) :
673 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
674 struct rdma_restrack_entry *res, uint32_t port)
676 struct ib_mr *mr = container_of(res, struct ib_mr, res);
677 struct ib_device *dev = mr->pd->device;
679 if (!dev->ops.fill_res_mr_entry_raw)
681 return dev->ops.fill_res_mr_entry_raw(msg, mr);
684 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
685 struct rdma_restrack_entry *res, uint32_t port)
687 struct ib_pd *pd = container_of(res, struct ib_pd, res);
689 if (has_cap_net_admin) {
690 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
693 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
694 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
695 pd->unsafe_global_rkey))
698 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
699 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
702 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
705 if (!rdma_is_kernel_res(res) &&
706 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
707 pd->uobject->context->res.id))
710 return fill_res_name_pid(msg, res);
712 err: return -EMSGSIZE;
715 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
716 struct rdma_restrack_entry *res, uint32_t port)
718 struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
720 if (rdma_is_kernel_res(res))
723 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
726 return fill_res_name_pid(msg, res);
729 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
732 struct nlattr *entry_attr;
737 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
741 if (min_range == max_range) {
742 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
745 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
747 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
750 nla_nest_end(msg, entry_attr);
754 nla_nest_cancel(msg, entry_attr);
758 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
760 uint32_t min_range = 0, prev = 0;
761 struct rdma_restrack_entry *res;
762 struct rdma_restrack_root *rt;
763 struct nlattr *table_attr;
764 struct ib_qp *qp = NULL;
765 unsigned long id = 0;
767 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
771 rt = &srq->device->res[RDMA_RESTRACK_QP];
773 xa_for_each(&rt->xa, id, res) {
774 if (!rdma_restrack_get(res))
777 qp = container_of(res, struct ib_qp, res);
778 if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
779 rdma_restrack_put(res);
783 if (qp->qp_num < prev)
784 /* qp_num should be ascending */
787 if (min_range == 0) {
788 min_range = qp->qp_num;
789 } else if (qp->qp_num > (prev + 1)) {
790 if (fill_res_range_qp_entry(msg, min_range, prev))
793 min_range = qp->qp_num;
796 rdma_restrack_put(res);
801 if (fill_res_range_qp_entry(msg, min_range, prev))
804 nla_nest_end(msg, table_attr);
808 rdma_restrack_put(res);
811 nla_nest_cancel(msg, table_attr);
815 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
816 struct rdma_restrack_entry *res, uint32_t port)
818 struct ib_srq *srq = container_of(res, struct ib_srq, res);
820 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
823 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
826 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
829 if (ib_srq_has_cq(srq->srq_type)) {
830 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
831 srq->ext.cq->res.id))
835 if (fill_res_srq_qps(msg, srq))
838 return fill_res_name_pid(msg, res);
844 static int fill_stat_counter_mode(struct sk_buff *msg,
845 struct rdma_counter *counter)
847 struct rdma_counter_mode *m = &counter->mode;
849 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
852 if (m->mode == RDMA_COUNTER_MODE_AUTO) {
853 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
854 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
857 if ((m->mask & RDMA_COUNTER_MASK_PID) &&
858 fill_res_name_pid(msg, &counter->res))
865 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
867 struct nlattr *entry_attr;
869 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
873 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
876 nla_nest_end(msg, entry_attr);
880 nla_nest_cancel(msg, entry_attr);
884 static int fill_stat_counter_qps(struct sk_buff *msg,
885 struct rdma_counter *counter)
887 struct rdma_restrack_entry *res;
888 struct rdma_restrack_root *rt;
889 struct nlattr *table_attr;
890 struct ib_qp *qp = NULL;
891 unsigned long id = 0;
894 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
896 rt = &counter->device->res[RDMA_RESTRACK_QP];
898 xa_for_each(&rt->xa, id, res) {
899 qp = container_of(res, struct ib_qp, res);
900 if (!qp->counter || (qp->counter->id != counter->id))
903 ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
909 nla_nest_end(msg, table_attr);
914 nla_nest_cancel(msg, table_attr);
918 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
921 struct nlattr *entry_attr;
923 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
927 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
930 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
931 value, RDMA_NLDEV_ATTR_PAD))
934 nla_nest_end(msg, entry_attr);
938 nla_nest_cancel(msg, entry_attr);
941 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
943 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
944 struct rdma_restrack_entry *res, uint32_t port)
946 struct ib_mr *mr = container_of(res, struct ib_mr, res);
947 struct ib_device *dev = mr->pd->device;
949 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
952 if (dev->ops.fill_stat_mr_entry)
953 return dev->ops.fill_stat_mr_entry(msg, mr);
960 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
961 struct rdma_counter *counter)
963 struct rdma_hw_stats *st = counter->stats;
964 struct nlattr *table_attr;
967 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
971 for (i = 0; i < st->num_counters; i++)
972 if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
975 nla_nest_end(msg, table_attr);
979 nla_nest_cancel(msg, table_attr);
983 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
984 struct rdma_restrack_entry *res,
987 struct rdma_counter *counter =
988 container_of(res, struct rdma_counter, res);
990 if (port && port != counter->port)
993 /* Dump it even query failed */
994 rdma_counter_query_stats(counter);
996 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
997 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
998 fill_stat_counter_mode(msg, counter) ||
999 fill_stat_counter_qps(msg, counter) ||
1000 fill_stat_counter_hwcounters(msg, counter))
1006 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1007 struct netlink_ext_ack *extack)
1009 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1010 struct ib_device *device;
1011 struct sk_buff *msg;
1015 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1016 nldev_policy, extack);
1017 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1020 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1022 device = ib_device_get_by_index(sock_net(skb->sk), index);
1026 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1032 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1033 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1036 err = fill_dev_info(msg, device);
1040 nlmsg_end(msg, nlh);
1042 ib_device_put(device);
1043 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1048 ib_device_put(device);
1052 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1053 struct netlink_ext_ack *extack)
1055 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1056 struct ib_device *device;
1060 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1061 nldev_policy, extack);
1062 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1065 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1066 device = ib_device_get_by_index(sock_net(skb->sk), index);
1070 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
1071 char name[IB_DEVICE_NAME_MAX] = {};
1073 nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1074 IB_DEVICE_NAME_MAX);
1075 if (strlen(name) == 0) {
1079 err = ib_device_rename(device, name);
1083 if (tb[RDMA_NLDEV_NET_NS_FD]) {
1086 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
1087 err = ib_device_set_netns_put(skb, device, ns_fd);
1091 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
1094 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
1095 err = ib_device_set_dim(device, use_dim);
1100 ib_device_put(device);
1105 static int _nldev_get_dumpit(struct ib_device *device,
1106 struct sk_buff *skb,
1107 struct netlink_callback *cb,
1110 int start = cb->args[0];
1111 struct nlmsghdr *nlh;
1116 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1117 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1120 if (fill_dev_info(skb, device)) {
1121 nlmsg_cancel(skb, nlh);
1125 nlmsg_end(skb, nlh);
1129 out: cb->args[0] = idx;
1133 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1136 * There is no need to take lock, because
1137 * we are relying on ib_core's locking.
1139 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1142 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1143 struct netlink_ext_ack *extack)
1145 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1146 struct ib_device *device;
1147 struct sk_buff *msg;
1152 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1153 nldev_policy, extack);
1155 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1156 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1159 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1160 device = ib_device_get_by_index(sock_net(skb->sk), index);
1164 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1165 if (!rdma_is_port_valid(device, port)) {
1170 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1176 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1177 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1180 err = fill_port_info(msg, device, port, sock_net(skb->sk));
1184 nlmsg_end(msg, nlh);
1185 ib_device_put(device);
1187 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1192 ib_device_put(device);
1196 static int nldev_port_get_dumpit(struct sk_buff *skb,
1197 struct netlink_callback *cb)
1199 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1200 struct ib_device *device;
1201 int start = cb->args[0];
1202 struct nlmsghdr *nlh;
1208 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1209 nldev_policy, NULL);
1210 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1213 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1214 device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1218 rdma_for_each_port (device, p) {
1220 * The dumpit function returns all information from specific
1221 * index. This specific index is taken from the netlink
1222 * messages request sent by user and it is available
1225 * Usually, the user doesn't fill this field and it causes
1226 * to return everything.
1234 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1236 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1237 RDMA_NLDEV_CMD_PORT_GET),
1240 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
1241 nlmsg_cancel(skb, nlh);
1245 nlmsg_end(skb, nlh);
1249 ib_device_put(device);
1254 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1255 struct netlink_ext_ack *extack)
1257 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1258 struct ib_device *device;
1259 struct sk_buff *msg;
1263 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1264 nldev_policy, extack);
1265 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1268 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1269 device = ib_device_get_by_index(sock_net(skb->sk), index);
1273 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1279 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1280 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1283 ret = fill_res_info(msg, device);
1287 nlmsg_end(msg, nlh);
1288 ib_device_put(device);
1289 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1294 ib_device_put(device);
1298 static int _nldev_res_get_dumpit(struct ib_device *device,
1299 struct sk_buff *skb,
1300 struct netlink_callback *cb,
1303 int start = cb->args[0];
1304 struct nlmsghdr *nlh;
1309 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1310 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1313 if (fill_res_info(skb, device)) {
1314 nlmsg_cancel(skb, nlh);
1317 nlmsg_end(skb, nlh);
1326 static int nldev_res_get_dumpit(struct sk_buff *skb,
1327 struct netlink_callback *cb)
1329 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1332 struct nldev_fill_res_entry {
1333 enum rdma_nldev_attr nldev_attr;
1339 enum nldev_res_flags {
1340 NLDEV_PER_DEV = 1 << 0,
1343 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1344 [RDMA_RESTRACK_QP] = {
1345 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1346 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1347 .id = RDMA_NLDEV_ATTR_RES_LQPN,
1349 [RDMA_RESTRACK_CM_ID] = {
1350 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1351 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1352 .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1354 [RDMA_RESTRACK_CQ] = {
1355 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1356 .flags = NLDEV_PER_DEV,
1357 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1358 .id = RDMA_NLDEV_ATTR_RES_CQN,
1360 [RDMA_RESTRACK_MR] = {
1361 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1362 .flags = NLDEV_PER_DEV,
1363 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1364 .id = RDMA_NLDEV_ATTR_RES_MRN,
1366 [RDMA_RESTRACK_PD] = {
1367 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1368 .flags = NLDEV_PER_DEV,
1369 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1370 .id = RDMA_NLDEV_ATTR_RES_PDN,
1372 [RDMA_RESTRACK_COUNTER] = {
1373 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1374 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1375 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1377 [RDMA_RESTRACK_CTX] = {
1378 .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
1379 .flags = NLDEV_PER_DEV,
1380 .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
1381 .id = RDMA_NLDEV_ATTR_RES_CTXN,
1383 [RDMA_RESTRACK_SRQ] = {
1384 .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
1385 .flags = NLDEV_PER_DEV,
1386 .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
1387 .id = RDMA_NLDEV_ATTR_RES_SRQN,
1392 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1393 struct netlink_ext_ack *extack,
1394 enum rdma_restrack_type res_type,
1395 res_fill_func_t fill_func)
1397 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1398 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1399 struct rdma_restrack_entry *res;
1400 struct ib_device *device;
1401 u32 index, id, port = 0;
1402 bool has_cap_net_admin;
1403 struct sk_buff *msg;
1406 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1407 nldev_policy, extack);
1408 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1411 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1412 device = ib_device_get_by_index(sock_net(skb->sk), index);
1416 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1417 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1418 if (!rdma_is_port_valid(device, port)) {
1424 if ((port && fe->flags & NLDEV_PER_DEV) ||
1425 (!port && ~fe->flags & NLDEV_PER_DEV)) {
1430 id = nla_get_u32(tb[fe->id]);
1431 res = rdma_restrack_get_byid(device, res_type, id);
1437 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1443 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1444 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1445 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1448 if (fill_nldev_handle(msg, device)) {
1453 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1455 ret = fill_func(msg, has_cap_net_admin, res, port);
1459 rdma_restrack_put(res);
1460 nlmsg_end(msg, nlh);
1461 ib_device_put(device);
1462 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1467 rdma_restrack_put(res);
1469 ib_device_put(device);
1473 static int res_get_common_dumpit(struct sk_buff *skb,
1474 struct netlink_callback *cb,
1475 enum rdma_restrack_type res_type,
1476 res_fill_func_t fill_func)
1478 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1479 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1480 struct rdma_restrack_entry *res;
1481 struct rdma_restrack_root *rt;
1482 int err, ret = 0, idx = 0;
1483 struct nlattr *table_attr;
1484 struct nlattr *entry_attr;
1485 struct ib_device *device;
1486 int start = cb->args[0];
1487 bool has_cap_net_admin;
1488 struct nlmsghdr *nlh;
1490 u32 index, port = 0;
1491 bool filled = false;
1493 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1494 nldev_policy, NULL);
1496 * Right now, we are expecting the device index to get res information,
1497 * but it is possible to extend this code to return all devices in
1498 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1499 * if it doesn't exist, we will iterate over all devices.
1501 * But it is not needed for now.
1503 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1506 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1507 device = ib_device_get_by_index(sock_net(skb->sk), index);
1512 * If no PORT_INDEX is supplied, we will return all QPs from that device
1514 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1515 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1516 if (!rdma_is_port_valid(device, port)) {
1522 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1523 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1524 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1527 if (fill_nldev_handle(skb, device)) {
1532 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1538 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1540 rt = &device->res[res_type];
1543 * FIXME: if the skip ahead is something common this loop should
1544 * use xas_for_each & xas_pause to optimize, we can have a lot of
1547 xa_for_each(&rt->xa, id, res) {
1548 if (idx < start || !rdma_restrack_get(res))
1555 entry_attr = nla_nest_start_noflag(skb, fe->entry);
1558 rdma_restrack_put(res);
1562 ret = fill_func(skb, has_cap_net_admin, res, port);
1564 rdma_restrack_put(res);
1567 nla_nest_cancel(skb, entry_attr);
1568 if (ret == -EMSGSIZE)
1574 nla_nest_end(skb, entry_attr);
1575 again: xa_lock(&rt->xa);
1581 nla_nest_end(skb, table_attr);
1582 nlmsg_end(skb, nlh);
1586 * No more entries to fill, cancel the message and
1587 * return 0 to mark end of dumpit.
1592 ib_device_put(device);
1596 nla_nest_cancel(skb, table_attr);
1599 nlmsg_cancel(skb, nlh);
1602 ib_device_put(device);
1606 #define RES_GET_FUNCS(name, type) \
1607 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
1608 struct netlink_callback *cb) \
1610 return res_get_common_dumpit(skb, cb, type, \
1611 fill_res_##name##_entry); \
1613 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
1614 struct nlmsghdr *nlh, \
1615 struct netlink_ext_ack *extack) \
1617 return res_get_common_doit(skb, nlh, extack, type, \
1618 fill_res_##name##_entry); \
1621 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1622 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1623 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1624 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1625 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1626 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1627 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1628 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1629 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1630 RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
1631 RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
1633 static LIST_HEAD(link_ops);
1634 static DECLARE_RWSEM(link_ops_rwsem);
1636 static const struct rdma_link_ops *link_ops_get(const char *type)
1638 const struct rdma_link_ops *ops;
1640 list_for_each_entry(ops, &link_ops, list) {
1641 if (!strcmp(ops->type, type))
1649 void rdma_link_register(struct rdma_link_ops *ops)
1651 down_write(&link_ops_rwsem);
1652 if (WARN_ON_ONCE(link_ops_get(ops->type)))
1654 list_add(&ops->list, &link_ops);
1656 up_write(&link_ops_rwsem);
1658 EXPORT_SYMBOL(rdma_link_register);
1660 void rdma_link_unregister(struct rdma_link_ops *ops)
1662 down_write(&link_ops_rwsem);
1663 list_del(&ops->list);
1664 up_write(&link_ops_rwsem);
1666 EXPORT_SYMBOL(rdma_link_unregister);
1668 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1669 struct netlink_ext_ack *extack)
1671 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1672 char ibdev_name[IB_DEVICE_NAME_MAX];
1673 const struct rdma_link_ops *ops;
1674 char ndev_name[IFNAMSIZ];
1675 struct net_device *ndev;
1676 char type[IFNAMSIZ];
1679 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1680 nldev_policy, extack);
1681 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1682 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1685 nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1686 sizeof(ibdev_name));
1687 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1690 nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1691 nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1694 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1698 down_read(&link_ops_rwsem);
1699 ops = link_ops_get(type);
1700 #ifdef CONFIG_MODULES
1702 up_read(&link_ops_rwsem);
1703 request_module("rdma-link-%s", type);
1704 down_read(&link_ops_rwsem);
1705 ops = link_ops_get(type);
1708 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1709 up_read(&link_ops_rwsem);
1715 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1716 struct netlink_ext_ack *extack)
1718 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1719 struct ib_device *device;
1723 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1724 nldev_policy, extack);
1725 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1728 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1729 device = ib_device_get_by_index(sock_net(skb->sk), index);
1733 if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
1734 ib_device_put(device);
1738 ib_unregister_device_and_put(device);
1742 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1743 struct netlink_ext_ack *extack)
1745 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1746 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1747 struct ib_client_nl_info data = {};
1748 struct ib_device *ibdev = NULL;
1749 struct sk_buff *msg;
1753 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1755 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1758 nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1759 sizeof(client_name));
1761 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1762 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1763 ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1767 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1768 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1769 if (!rdma_is_port_valid(ibdev, data.port)) {
1776 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1780 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1785 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1786 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1787 RDMA_NLDEV_CMD_GET_CHARDEV),
1791 err = ib_get_client_nl_info(ibdev, client_name, &data);
1795 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1796 huge_encode_dev(data.cdev->devt),
1797 RDMA_NLDEV_ATTR_PAD);
1800 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1801 RDMA_NLDEV_ATTR_PAD);
1804 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1805 dev_name(data.cdev))) {
1810 nlmsg_end(msg, nlh);
1811 put_device(data.cdev);
1813 ib_device_put(ibdev);
1814 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1817 put_device(data.cdev);
1822 ib_device_put(ibdev);
1826 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1827 struct netlink_ext_ack *extack)
1829 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1830 struct sk_buff *msg;
1833 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1834 nldev_policy, extack);
1838 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1842 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1843 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1844 RDMA_NLDEV_CMD_SYS_GET),
1847 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1848 (u8)ib_devices_shared_netns);
1855 * Copy-on-fork is supported.
1857 * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
1858 * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
1859 * for more details. Don't backport this without them.
1861 * Return value ignored on purpose, assume copy-on-fork is not
1862 * supported in case of failure.
1864 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
1866 nlmsg_end(msg, nlh);
1867 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1870 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1871 struct netlink_ext_ack *extack)
1873 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1877 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1878 nldev_policy, extack);
1879 if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1882 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1883 /* Only 0 and 1 are supported */
1887 err = rdma_compatdev_set(enable);
1891 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1892 struct netlink_ext_ack *extack)
1894 u32 index, port, mode, mask = 0, qpn, cntn = 0;
1895 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1896 struct ib_device *device;
1897 struct sk_buff *msg;
1900 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1901 nldev_policy, extack);
1902 /* Currently only counter for QP is supported */
1903 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1904 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1905 !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1908 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1911 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1912 device = ib_device_get_by_index(sock_net(skb->sk), index);
1916 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1917 if (!rdma_is_port_valid(device, port)) {
1922 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1927 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1928 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1929 RDMA_NLDEV_CMD_STAT_SET),
1932 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1933 if (mode == RDMA_COUNTER_MODE_AUTO) {
1934 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1936 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1937 ret = rdma_counter_set_auto_mode(device, port, mask, extack);
1941 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
1943 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1944 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1945 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1946 ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1948 ret = rdma_counter_bind_qpn_alloc(device, port,
1954 if (fill_nldev_handle(msg, device) ||
1955 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1956 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1957 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1963 nlmsg_end(msg, nlh);
1964 ib_device_put(device);
1965 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1968 rdma_counter_unbind_qpn(device, port, qpn, cntn);
1972 ib_device_put(device);
1976 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1977 struct netlink_ext_ack *extack)
1979 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1980 struct ib_device *device;
1981 struct sk_buff *msg;
1982 u32 index, port, qpn, cntn;
1985 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1986 nldev_policy, extack);
1987 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1988 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1989 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1990 !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1993 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1996 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1997 device = ib_device_get_by_index(sock_net(skb->sk), index);
2001 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2002 if (!rdma_is_port_valid(device, port)) {
2007 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2012 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2013 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2014 RDMA_NLDEV_CMD_STAT_SET),
2017 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2018 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2019 if (fill_nldev_handle(msg, device) ||
2020 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2021 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2022 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2027 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
2031 nlmsg_end(msg, nlh);
2032 ib_device_put(device);
2033 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2038 ib_device_put(device);
2042 static int stat_get_doit_default_counter(struct sk_buff *skb,
2043 struct nlmsghdr *nlh,
2044 struct netlink_ext_ack *extack,
2045 struct nlattr *tb[])
2047 struct rdma_hw_stats *stats;
2048 struct nlattr *table_attr;
2049 struct ib_device *device;
2050 int ret, num_cnts, i;
2051 struct sk_buff *msg;
2055 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2058 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2059 device = ib_device_get_by_index(sock_net(skb->sk), index);
2063 if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
2068 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2069 if (!rdma_is_port_valid(device, port)) {
2074 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2080 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2081 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2082 RDMA_NLDEV_CMD_STAT_GET),
2085 if (fill_nldev_handle(msg, device) ||
2086 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
2091 stats = device->port_data ? device->port_data[port].hw_stats : NULL;
2092 if (stats == NULL) {
2096 mutex_lock(&stats->lock);
2098 num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
2104 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
2109 for (i = 0; i < num_cnts; i++) {
2110 v = stats->value[i] +
2111 rdma_counter_get_hwstat_value(device, port, i);
2112 if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
2117 nla_nest_end(msg, table_attr);
2119 mutex_unlock(&stats->lock);
2120 nlmsg_end(msg, nlh);
2121 ib_device_put(device);
2122 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2125 nla_nest_cancel(msg, table_attr);
2127 mutex_unlock(&stats->lock);
2131 ib_device_put(device);
2135 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
2136 struct netlink_ext_ack *extack, struct nlattr *tb[])
2139 static enum rdma_nl_counter_mode mode;
2140 static enum rdma_nl_counter_mask mask;
2141 struct ib_device *device;
2142 struct sk_buff *msg;
2146 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
2147 return nldev_res_get_counter_doit(skb, nlh, extack);
2149 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
2150 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2153 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2154 device = ib_device_get_by_index(sock_net(skb->sk), index);
2158 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2159 if (!rdma_is_port_valid(device, port)) {
2164 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2170 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2171 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2172 RDMA_NLDEV_CMD_STAT_GET),
2175 ret = rdma_counter_get_mode(device, port, &mode, &mask);
2179 if (fill_nldev_handle(msg, device) ||
2180 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2181 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2186 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2187 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2192 nlmsg_end(msg, nlh);
2193 ib_device_put(device);
2194 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2199 ib_device_put(device);
2203 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2204 struct netlink_ext_ack *extack)
2206 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2209 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2210 nldev_policy, extack);
2214 if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
2215 return stat_get_doit_default_counter(skb, nlh, extack, tb);
2217 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2218 case RDMA_NLDEV_ATTR_RES_QP:
2219 ret = stat_get_doit_qp(skb, nlh, extack, tb);
2221 case RDMA_NLDEV_ATTR_RES_MR:
2222 ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
2223 fill_stat_mr_entry);
2233 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2234 struct netlink_callback *cb)
2236 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2239 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2240 nldev_policy, NULL);
2241 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2244 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2245 case RDMA_NLDEV_ATTR_RES_QP:
2246 ret = nldev_res_get_counter_dumpit(skb, cb);
2248 case RDMA_NLDEV_ATTR_RES_MR:
2249 ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
2250 fill_stat_mr_entry);
2260 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2261 [RDMA_NLDEV_CMD_GET] = {
2262 .doit = nldev_get_doit,
2263 .dump = nldev_get_dumpit,
2265 [RDMA_NLDEV_CMD_GET_CHARDEV] = {
2266 .doit = nldev_get_chardev,
2268 [RDMA_NLDEV_CMD_SET] = {
2269 .doit = nldev_set_doit,
2270 .flags = RDMA_NL_ADMIN_PERM,
2272 [RDMA_NLDEV_CMD_NEWLINK] = {
2273 .doit = nldev_newlink,
2274 .flags = RDMA_NL_ADMIN_PERM,
2276 [RDMA_NLDEV_CMD_DELLINK] = {
2277 .doit = nldev_dellink,
2278 .flags = RDMA_NL_ADMIN_PERM,
2280 [RDMA_NLDEV_CMD_PORT_GET] = {
2281 .doit = nldev_port_get_doit,
2282 .dump = nldev_port_get_dumpit,
2284 [RDMA_NLDEV_CMD_RES_GET] = {
2285 .doit = nldev_res_get_doit,
2286 .dump = nldev_res_get_dumpit,
2288 [RDMA_NLDEV_CMD_RES_QP_GET] = {
2289 .doit = nldev_res_get_qp_doit,
2290 .dump = nldev_res_get_qp_dumpit,
2292 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2293 .doit = nldev_res_get_cm_id_doit,
2294 .dump = nldev_res_get_cm_id_dumpit,
2296 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
2297 .doit = nldev_res_get_cq_doit,
2298 .dump = nldev_res_get_cq_dumpit,
2300 [RDMA_NLDEV_CMD_RES_MR_GET] = {
2301 .doit = nldev_res_get_mr_doit,
2302 .dump = nldev_res_get_mr_dumpit,
2304 [RDMA_NLDEV_CMD_RES_PD_GET] = {
2305 .doit = nldev_res_get_pd_doit,
2306 .dump = nldev_res_get_pd_dumpit,
2308 [RDMA_NLDEV_CMD_RES_CTX_GET] = {
2309 .doit = nldev_res_get_ctx_doit,
2310 .dump = nldev_res_get_ctx_dumpit,
2312 [RDMA_NLDEV_CMD_RES_SRQ_GET] = {
2313 .doit = nldev_res_get_srq_doit,
2314 .dump = nldev_res_get_srq_dumpit,
2316 [RDMA_NLDEV_CMD_SYS_GET] = {
2317 .doit = nldev_sys_get_doit,
2319 [RDMA_NLDEV_CMD_SYS_SET] = {
2320 .doit = nldev_set_sys_set_doit,
2322 [RDMA_NLDEV_CMD_STAT_SET] = {
2323 .doit = nldev_stat_set_doit,
2324 .flags = RDMA_NL_ADMIN_PERM,
2326 [RDMA_NLDEV_CMD_STAT_GET] = {
2327 .doit = nldev_stat_get_doit,
2328 .dump = nldev_stat_get_dumpit,
2330 [RDMA_NLDEV_CMD_STAT_DEL] = {
2331 .doit = nldev_stat_del_doit,
2332 .flags = RDMA_NL_ADMIN_PERM,
2334 [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
2335 .doit = nldev_res_get_qp_raw_doit,
2336 .dump = nldev_res_get_qp_raw_dumpit,
2337 .flags = RDMA_NL_ADMIN_PERM,
2339 [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
2340 .doit = nldev_res_get_cq_raw_doit,
2341 .dump = nldev_res_get_cq_raw_dumpit,
2342 .flags = RDMA_NL_ADMIN_PERM,
2344 [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
2345 .doit = nldev_res_get_mr_raw_doit,
2346 .dump = nldev_res_get_mr_raw_dumpit,
2347 .flags = RDMA_NL_ADMIN_PERM,
2351 void __init nldev_init(void)
2353 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2356 void __exit nldev_exit(void)
2358 rdma_nl_unregister(RDMA_NL_NLDEV);
2361 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);