1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
8 #include <linux/if_arp.h>
9 #include <linux/netdevice.h>
11 #include <linux/if_vlan.h>
12 #include <net/udp_tunnel.h>
13 #include <net/sch_generic.h>
14 #include <linux/netfilter.h>
15 #include <rdma/ib_addr.h>
21 static struct rxe_recv_sockets recv_sockets;
23 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
26 unsigned char ll_addr[ETH_ALEN];
28 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
29 err = dev_mc_add(rxe->ndev, ll_addr);
34 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
37 unsigned char ll_addr[ETH_ALEN];
39 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
40 err = dev_mc_del(rxe->ndev, ll_addr);
45 static struct dst_entry *rxe_find_route4(struct net_device *ndev,
46 struct in_addr *saddr,
47 struct in_addr *daddr)
50 struct flowi4 fl = { { 0 } };
52 memset(&fl, 0, sizeof(fl));
53 fl.flowi4_oif = ndev->ifindex;
54 memcpy(&fl.saddr, saddr, sizeof(*saddr));
55 memcpy(&fl.daddr, daddr, sizeof(*daddr));
56 fl.flowi4_proto = IPPROTO_UDP;
58 rt = ip_route_output_key(&init_net, &fl);
60 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
69 struct in6_addr *saddr,
70 struct in6_addr *daddr)
72 struct dst_entry *ndst;
73 struct flowi6 fl6 = { { 0 } };
75 memset(&fl6, 0, sizeof(fl6));
76 fl6.flowi6_oif = ndev->ifindex;
77 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
78 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
79 fl6.flowi6_proto = IPPROTO_UDP;
81 ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
82 recv_sockets.sk6->sk, &fl6,
85 pr_err_ratelimited("no route to %pI6\n", daddr);
89 if (unlikely(ndst->error)) {
90 pr_err("no route to %pI6\n", daddr);
102 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
103 struct in6_addr *saddr,
104 struct in6_addr *daddr)
111 static struct dst_entry *rxe_find_route(struct net_device *ndev,
115 struct dst_entry *dst = NULL;
117 if (qp_type(qp) == IB_QPT_RC)
118 dst = sk_dst_get(qp->sk->sk);
120 if (!dst || !dst_check(dst, qp->dst_cookie)) {
124 if (av->network_type == RXE_NETWORK_TYPE_IPV4) {
125 struct in_addr *saddr;
126 struct in_addr *daddr;
128 saddr = &av->sgid_addr._sockaddr_in.sin_addr;
129 daddr = &av->dgid_addr._sockaddr_in.sin_addr;
130 dst = rxe_find_route4(ndev, saddr, daddr);
131 } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
132 struct in6_addr *saddr6;
133 struct in6_addr *daddr6;
135 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
136 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
137 dst = rxe_find_route6(ndev, saddr6, daddr6);
138 #if IS_ENABLED(CONFIG_IPV6)
141 rt6_get_cookie((struct rt6_info *)dst);
145 if (dst && (qp_type(qp) == IB_QPT_RC)) {
147 sk_dst_set(qp->sk->sk, dst);
153 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
157 struct net_device *ndev = skb->dev;
158 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
160 /* takes a reference on rxe->ib_dev
161 * drop when skb is freed
163 rxe = rxe_get_dev_from_net(ndev);
164 if (!rxe && is_vlan_dev(ndev))
165 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
169 if (skb_linearize(skb)) {
170 pr_err("skb_linearize failed\n");
171 ib_device_put(&rxe->ib_dev);
178 pkt->hdr = (u8 *)(udph + 1);
179 pkt->mask = RXE_GRH_MASK;
180 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
191 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
196 struct udp_port_cfg udp_cfg = { };
197 struct udp_tunnel_sock_cfg tnl_cfg = { };
200 udp_cfg.family = AF_INET6;
201 udp_cfg.ipv6_v6only = 1;
203 udp_cfg.family = AF_INET;
206 udp_cfg.local_udp_port = port;
208 /* Create UDP socket */
209 err = udp_sock_create(net, &udp_cfg, &sock);
213 tnl_cfg.encap_type = 1;
214 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
216 /* Setup UDP tunnel */
217 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
222 static void rxe_release_udp_tunnel(struct socket *sk)
225 udp_tunnel_sock_release(sk);
228 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
233 __skb_push(skb, sizeof(*udph));
234 skb_reset_transport_header(skb);
237 udph->dest = dst_port;
238 udph->source = src_port;
239 udph->len = htons(skb->len);
243 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
244 __be32 saddr, __be32 daddr, __u8 proto,
245 __u8 tos, __u8 ttl, __be16 df, bool xnet)
249 skb_scrub_packet(skb, xnet);
252 skb_dst_set(skb, dst_clone(dst));
253 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
255 skb_push(skb, sizeof(struct iphdr));
256 skb_reset_network_header(skb);
260 iph->version = IPVERSION;
261 iph->ihl = sizeof(struct iphdr) >> 2;
263 iph->protocol = proto;
268 __ip_select_ident(dev_net(dst->dev), iph,
269 skb_shinfo(skb)->gso_segs ?: 1);
272 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
273 struct in6_addr *saddr, struct in6_addr *daddr,
274 __u8 proto, __u8 prio, __u8 ttl)
276 struct ipv6hdr *ip6h;
278 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
279 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
281 skb_dst_set(skb, dst_clone(dst));
283 __skb_push(skb, sizeof(*ip6h));
284 skb_reset_network_header(skb);
285 ip6h = ipv6_hdr(skb);
286 ip6_flow_hdr(ip6h, prio, htonl(0));
287 ip6h->payload_len = htons(skb->len);
288 ip6h->nexthdr = proto;
289 ip6h->hop_limit = ttl;
290 ip6h->daddr = *daddr;
291 ip6h->saddr = *saddr;
292 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
295 static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
297 struct rxe_qp *qp = pkt->qp;
298 struct dst_entry *dst;
300 __be16 df = htons(IP_DF);
301 struct rxe_av *av = rxe_get_av(pkt);
302 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
303 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
305 dst = rxe_find_route(skb->dev, qp, av);
307 pr_err("Host not reachable\n");
308 return -EHOSTUNREACH;
311 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
312 cpu_to_be16(ROCE_V2_UDP_DPORT));
314 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
315 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
321 static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
323 struct rxe_qp *qp = pkt->qp;
324 struct dst_entry *dst;
325 struct rxe_av *av = rxe_get_av(pkt);
326 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
327 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
329 dst = rxe_find_route(skb->dev, qp, av);
331 pr_err("Host not reachable\n");
332 return -EHOSTUNREACH;
335 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
336 cpu_to_be16(ROCE_V2_UDP_DPORT));
338 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
339 av->grh.traffic_class,
346 int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb)
350 if (skb->protocol == htons(ETH_P_IP))
351 err = prepare4(pkt, skb);
352 else if (skb->protocol == htons(ETH_P_IPV6))
353 err = prepare6(pkt, skb);
355 if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
356 pkt->mask |= RXE_LOOPBACK_MASK;
361 static void rxe_skb_tx_dtor(struct sk_buff *skb)
363 struct sock *sk = skb->sk;
364 struct rxe_qp *qp = sk->sk_user_data;
365 int skb_out = atomic_dec_return(&qp->skb_out);
367 if (unlikely(qp->need_req_skb &&
368 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
369 rxe_run_task(&qp->req.task, 1);
374 static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
378 skb->destructor = rxe_skb_tx_dtor;
379 skb->sk = pkt->qp->sk->sk;
381 rxe_add_ref(pkt->qp);
382 atomic_inc(&pkt->qp->skb_out);
384 if (skb->protocol == htons(ETH_P_IP)) {
385 err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
386 } else if (skb->protocol == htons(ETH_P_IPV6)) {
387 err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
389 pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
390 atomic_dec(&pkt->qp->skb_out);
391 rxe_drop_ref(pkt->qp);
396 if (unlikely(net_xmit_eval(err))) {
397 pr_debug("error sending packet: %d\n", err);
404 /* fix up a send packet to match the packets
405 * received from UDP before looping them back
407 static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
409 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
411 if (skb->protocol == htons(ETH_P_IP))
412 skb_pull(skb, sizeof(struct iphdr));
414 skb_pull(skb, sizeof(struct ipv6hdr));
416 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) {
426 int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
430 int is_request = pkt->mask & RXE_REQ_MASK;
431 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
433 if ((is_request && (qp->req.state != QP_STATE_READY)) ||
434 (!is_request && (qp->resp.state != QP_STATE_READY))) {
435 pr_info("Packet dropped. QP is not in ready state\n");
439 rxe_icrc_generate(skb, pkt);
441 if (pkt->mask & RXE_LOOPBACK_MASK)
442 err = rxe_loopback(skb, pkt);
444 err = rxe_send(skb, pkt);
447 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
451 if ((qp_type(qp) != IB_QPT_RC) &&
452 (pkt->mask & RXE_END_MASK)) {
453 pkt->wqe->state = wqe_state_done;
454 rxe_run_task(&qp->comp.task, 1);
457 rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
467 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
468 int paylen, struct rxe_pkt_info *pkt)
470 unsigned int hdr_len;
471 struct sk_buff *skb = NULL;
472 struct net_device *ndev;
473 const struct ib_gid_attr *attr;
474 const int port_num = 1;
476 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
480 if (av->network_type == RXE_NETWORK_TYPE_IPV4)
481 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
482 sizeof(struct iphdr);
484 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
485 sizeof(struct ipv6hdr);
488 ndev = rdma_read_gid_attr_ndev_rcu(attr);
493 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
496 if (unlikely(!skb)) {
501 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
503 /* FIXME: hold reference to this netdev until life of this skb. */
507 if (av->network_type == RXE_NETWORK_TYPE_IPV4)
508 skb->protocol = htons(ETH_P_IP);
510 skb->protocol = htons(ETH_P_IPV6);
513 pkt->port_num = port_num;
514 pkt->hdr = skb_put(skb, paylen);
515 pkt->mask |= RXE_GRH_MASK;
518 rdma_put_gid_attr(attr);
523 * this is required by rxe_cfg to match rxe devices in
524 * /sys/class/infiniband up with their underlying ethernet devices
526 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
528 return rxe->ndev->name;
531 int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
534 struct rxe_dev *rxe = NULL;
536 rxe = ib_alloc_device(rxe_dev, ib_dev);
542 err = rxe_add(rxe, ndev->mtu, ibdev_name);
544 ib_dealloc_device(&rxe->ib_dev);
551 static void rxe_port_event(struct rxe_dev *rxe,
552 enum ib_event_type event)
556 ev.device = &rxe->ib_dev;
557 ev.element.port_num = 1;
560 ib_dispatch_event(&ev);
563 /* Caller must hold net_info_lock */
564 void rxe_port_up(struct rxe_dev *rxe)
566 struct rxe_port *port;
569 port->attr.state = IB_PORT_ACTIVE;
571 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
572 dev_info(&rxe->ib_dev.dev, "set active\n");
575 /* Caller must hold net_info_lock */
576 void rxe_port_down(struct rxe_dev *rxe)
578 struct rxe_port *port;
581 port->attr.state = IB_PORT_DOWN;
583 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
584 rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
585 dev_info(&rxe->ib_dev.dev, "set down\n");
588 void rxe_set_port_state(struct rxe_dev *rxe)
590 if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
596 static int rxe_notify(struct notifier_block *not_blk,
600 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
601 struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
607 case NETDEV_UNREGISTER:
608 ib_unregister_device_queued(&rxe->ib_dev);
616 case NETDEV_CHANGEMTU:
617 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
618 rxe_set_mtu(rxe, ndev->mtu);
621 rxe_set_port_state(rxe);
624 case NETDEV_GOING_DOWN:
625 case NETDEV_CHANGEADDR:
626 case NETDEV_CHANGENAME:
627 case NETDEV_FEAT_CHANGE:
629 pr_info("ignoring netdev event = %ld for %s\n",
634 ib_device_put(&rxe->ib_dev);
638 static struct notifier_block rxe_net_notifier = {
639 .notifier_call = rxe_notify,
642 static int rxe_net_ipv4_init(void)
644 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
645 htons(ROCE_V2_UDP_DPORT), false);
646 if (IS_ERR(recv_sockets.sk4)) {
647 recv_sockets.sk4 = NULL;
648 pr_err("Failed to create IPv4 UDP tunnel\n");
655 static int rxe_net_ipv6_init(void)
657 #if IS_ENABLED(CONFIG_IPV6)
659 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
660 htons(ROCE_V2_UDP_DPORT), true);
661 if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
662 recv_sockets.sk6 = NULL;
663 pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
667 if (IS_ERR(recv_sockets.sk6)) {
668 recv_sockets.sk6 = NULL;
669 pr_err("Failed to create IPv6 UDP tunnel\n");
676 void rxe_net_exit(void)
678 rxe_release_udp_tunnel(recv_sockets.sk6);
679 rxe_release_udp_tunnel(recv_sockets.sk4);
680 unregister_netdevice_notifier(&rxe_net_notifier);
683 int rxe_net_init(void)
687 recv_sockets.sk6 = NULL;
689 err = rxe_net_ipv4_init();
692 err = rxe_net_ipv6_init();
695 err = register_netdevice_notifier(&rxe_net_notifier);
697 pr_err("Failed to register netdev notifier\n");