1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Monitoring SMC transport protocol sockets
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/init.h>
16 #include <linux/sock_diag.h>
17 #include <linux/inet_diag.h>
18 #include <linux/smc_diag.h>
19 #include <net/netlink.h>
25 struct smc_diag_dump_ctx {
29 static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
31 return (struct smc_diag_dump_ctx *)cb->ctx;
34 static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
36 struct smc_sock *smc = smc_sk(sk);
38 memset(r, 0, sizeof(*r));
39 r->diag_family = sk->sk_family;
40 sock_diag_save_cookie(sk, r->id.idiag_cookie);
43 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
44 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
45 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
46 if (sk->sk_protocol == SMCPROTO_SMC) {
47 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
48 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
49 #if IS_ENABLED(CONFIG_IPV6)
50 } else if (sk->sk_protocol == SMCPROTO_SMC6) {
51 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
52 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
53 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
54 sizeof(smc->clcsock->sk->sk_v6_daddr));
59 static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
60 struct smc_diag_msg *r,
61 struct user_namespace *user_ns)
63 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
66 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
67 r->diag_inode = sock_i_ino(sk);
71 static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
72 struct netlink_callback *cb,
73 const struct smc_diag_req *req,
76 struct smc_sock *smc = smc_sk(sk);
77 struct smc_diag_fallback fallback;
78 struct user_namespace *user_ns;
79 struct smc_diag_msg *r;
82 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
83 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
88 smc_diag_msg_common_fill(r, sk);
89 r->diag_state = sk->sk_state;
90 if (smc->use_fallback)
91 r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
92 else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
93 r->diag_mode = SMC_DIAG_MODE_SMCD;
95 r->diag_mode = SMC_DIAG_MODE_SMCR;
96 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
97 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
100 fallback.reason = smc->fallback_rsn;
101 fallback.peer_diagnosis = smc->peer_diagnosis;
102 if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
105 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
106 smc->conn.alert_token_local) {
107 struct smc_connection *conn = &smc->conn;
108 struct smc_diag_conninfo cinfo = {
109 .token = conn->alert_token_local,
110 .sndbuf_size = conn->sndbuf_desc ?
111 conn->sndbuf_desc->len : 0,
112 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
113 .peer_rmbe_size = conn->peer_rmbe_size,
115 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
116 .rx_prod.count = conn->local_rx_ctrl.prod.count,
117 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
118 .rx_cons.count = conn->local_rx_ctrl.cons.count,
120 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
121 .tx_prod.count = conn->local_tx_ctrl.prod.count,
122 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
123 .tx_cons.count = conn->local_tx_ctrl.cons.count,
126 *(u8 *)&conn->local_tx_ctrl.prod_flags,
127 .tx_conn_state_flags =
128 *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
129 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
130 .rx_conn_state_flags =
131 *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
133 .tx_prep.wrap = conn->tx_curs_prep.wrap,
134 .tx_prep.count = conn->tx_curs_prep.count,
135 .tx_sent.wrap = conn->tx_curs_sent.wrap,
136 .tx_sent.count = conn->tx_curs_sent.count,
137 .tx_fin.wrap = conn->tx_curs_fin.wrap,
138 .tx_fin.count = conn->tx_curs_fin.count,
141 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
145 if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
146 (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
147 !list_empty(&smc->conn.lgr->list)) {
148 struct smc_diag_lgrinfo linfo = {
149 .role = smc->conn.lgr->role,
150 .lnk[0].ibport = smc->conn.lnk->ibport,
151 .lnk[0].link_id = smc->conn.lnk->link_id,
154 memcpy(linfo.lnk[0].ibname,
155 smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
156 sizeof(smc->conn.lnk->smcibdev->ibdev->name));
157 smc_gid_be16_convert(linfo.lnk[0].gid,
159 smc_gid_be16_convert(linfo.lnk[0].peer_gid,
160 smc->conn.lnk->peer_gid);
162 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
165 if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
166 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
167 !list_empty(&smc->conn.lgr->list)) {
168 struct smc_connection *conn = &smc->conn;
169 struct smcd_diag_dmbinfo dinfo;
171 memset(&dinfo, 0, sizeof(dinfo));
173 dinfo.linkid = *((u32 *)conn->lgr->id);
174 dinfo.peer_gid = conn->lgr->peer_gid;
175 dinfo.my_gid = conn->lgr->smcd->local_gid;
176 dinfo.token = conn->rmb_desc->token;
177 dinfo.peer_token = conn->peer_token;
179 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
187 nlmsg_cancel(skb, nlh);
191 static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
192 struct netlink_callback *cb, int p_type)
194 struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb);
195 struct net *net = sock_net(skb->sk);
196 int snum = cb_ctx->pos[p_type];
197 struct nlattr *bc = NULL;
198 struct hlist_head *head;
202 read_lock(&prot->h.smc_hash->lock);
203 head = &prot->h.smc_hash->ht;
204 if (hlist_empty(head))
207 sk_for_each(sk, head) {
208 if (!net_eq(sock_net(sk), net))
212 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
220 read_unlock(&prot->h.smc_hash->lock);
221 cb_ctx->pos[p_type] = num;
225 static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
229 rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC);
231 smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6);
235 static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
237 struct net *net = sock_net(skb->sk);
239 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
240 h->nlmsg_flags & NLM_F_DUMP) {
242 struct netlink_dump_control c = {
243 .dump = smc_diag_dump,
244 .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
246 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
252 static const struct sock_diag_handler smc_diag_handler = {
254 .dump = smc_diag_handler_dump,
257 static int __init smc_diag_init(void)
259 return sock_diag_register(&smc_diag_handler);
262 static void __exit smc_diag_exit(void)
264 sock_diag_unregister(&smc_diag_handler);
267 module_init(smc_diag_init);
268 module_exit(smc_diag_exit);
269 MODULE_LICENSE("GPL");
270 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);