1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
7 static u32 ipvlan_jhash_secret __read_mostly;
9 void ipvlan_init_secret(void)
11 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
14 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
15 unsigned int len, bool success, bool mcast)
17 if (likely(success)) {
18 struct ipvl_pcpu_stats *pcptr;
20 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
21 u64_stats_update_begin(&pcptr->syncp);
22 u64_stats_inc(&pcptr->rx_pkts);
23 u64_stats_add(&pcptr->rx_bytes, len);
25 u64_stats_inc(&pcptr->rx_mcast);
26 u64_stats_update_end(&pcptr->syncp);
28 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
31 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
33 #if IS_ENABLED(CONFIG_IPV6)
34 static u8 ipvlan_get_v6_hash(const void *iaddr)
36 const struct in6_addr *ip6_addr = iaddr;
38 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
42 static u8 ipvlan_get_v6_hash(const void *iaddr)
48 static u8 ipvlan_get_v4_hash(const void *iaddr)
50 const struct in_addr *ip4_addr = iaddr;
52 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
56 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
58 if (!is_v6 && addr->atype == IPVL_IPV4) {
59 struct in_addr *i4addr = (struct in_addr *)iaddr;
61 return addr->ip4addr.s_addr == i4addr->s_addr;
62 #if IS_ENABLED(CONFIG_IPV6)
63 } else if (is_v6 && addr->atype == IPVL_IPV6) {
64 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
66 return ipv6_addr_equal(&addr->ip6addr, i6addr);
73 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
74 const void *iaddr, bool is_v6)
76 struct ipvl_addr *addr;
79 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
80 ipvlan_get_v4_hash(iaddr);
81 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
82 if (addr_equal(is_v6, addr, iaddr))
87 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
89 struct ipvl_port *port = ipvlan->port;
92 hash = (addr->atype == IPVL_IPV6) ?
93 ipvlan_get_v6_hash(&addr->ip6addr) :
94 ipvlan_get_v4_hash(&addr->ip4addr);
95 if (hlist_unhashed(&addr->hlnode))
96 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
99 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
101 hlist_del_init_rcu(&addr->hlnode);
104 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
105 const void *iaddr, bool is_v6)
107 struct ipvl_addr *addr, *ret = NULL;
110 list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
111 if (addr_equal(is_v6, addr, iaddr)) {
120 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
122 struct ipvl_dev *ipvlan;
126 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
127 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
136 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
140 switch (skb->protocol) {
141 case htons(ETH_P_ARP): {
144 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
152 case htons(ETH_P_IP): {
156 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
160 pktlen = skb_ip_totlen(skb);
161 if (ip4h->ihl < 5 || ip4h->version != 4)
163 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
170 #if IS_ENABLED(CONFIG_IPV6)
171 case htons(ETH_P_IPV6): {
172 struct ipv6hdr *ip6h;
174 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
177 ip6h = ipv6_hdr(skb);
178 if (ip6h->version != 6)
183 /* Only Neighbour Solicitation pkts need different treatment */
184 if (ipv6_addr_any(&ip6h->saddr) &&
185 ip6h->nexthdr == NEXTHDR_ICMP) {
186 struct icmp6hdr *icmph;
188 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
191 ip6h = ipv6_hdr(skb);
192 icmph = (struct icmp6hdr *)(ip6h + 1);
194 if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
195 /* Need to access the ipv6 address in body */
196 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
197 + sizeof(struct in6_addr))))
200 ip6h = ipv6_hdr(skb);
201 icmph = (struct icmp6hdr *)(ip6h + 1);
217 unsigned int ipvlan_mac_hash(const unsigned char *addr)
219 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
220 ipvlan_jhash_secret);
222 return hash & IPVLAN_MAC_FILTER_MASK;
225 void ipvlan_process_multicast(struct work_struct *work)
227 struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
229 struct ipvl_dev *ipvlan;
230 struct sk_buff *skb, *nskb;
231 struct sk_buff_head list;
233 unsigned int mac_hash;
238 __skb_queue_head_init(&list);
240 spin_lock_bh(&port->backlog.lock);
241 skb_queue_splice_tail_init(&port->backlog, &list);
242 spin_unlock_bh(&port->backlog.lock);
244 while ((skb = __skb_dequeue(&list)) != NULL) {
245 struct net_device *dev = skb->dev;
246 bool consumed = false;
249 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
250 mac_hash = ipvlan_mac_hash(ethh->h_dest);
252 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
253 pkt_type = PACKET_BROADCAST;
255 pkt_type = PACKET_MULTICAST;
258 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
259 if (tx_pkt && (ipvlan->dev == skb->dev))
261 if (!test_bit(mac_hash, ipvlan->mac_filters))
263 if (!(ipvlan->dev->flags & IFF_UP))
266 len = skb->len + ETH_HLEN;
267 nskb = skb_clone(skb, GFP_ATOMIC);
271 nskb->pkt_type = pkt_type;
272 nskb->dev = ipvlan->dev;
274 ret = dev_forward_skb(ipvlan->dev, nskb);
276 ret = netif_rx(nskb);
278 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
284 /* If the packet originated here, send it out. */
285 skb->dev = port->dev;
286 skb->pkt_type = pkt_type;
299 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
304 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
306 skb_scrub_packet(skb, xnet);
311 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
314 struct ipvl_dev *ipvlan = addr->master;
315 struct net_device *dev = ipvlan->dev;
317 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
318 bool success = false;
319 struct sk_buff *skb = *pskb;
321 len = skb->len + ETH_HLEN;
322 /* Only packets exchanged between two local slaves need to have
323 * device-up check as well as skb-share check.
326 if (unlikely(!(dev->flags & IFF_UP))) {
331 skb = skb_share_check(skb, GFP_ATOMIC);
339 skb->pkt_type = PACKET_HOST;
340 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
344 ret = RX_HANDLER_ANOTHER;
349 ipvlan_count_rx(ipvlan, len, success, false);
353 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
354 int addr_type, bool use_dest)
356 struct ipvl_addr *addr = NULL;
359 #if IS_ENABLED(CONFIG_IPV6)
361 struct ipv6hdr *ip6h;
362 struct in6_addr *i6addr;
364 ip6h = (struct ipv6hdr *)lyr3h;
365 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
366 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
371 struct in6_addr *i6addr;
373 /* Make sure that the NeighborSolicitation ICMPv6 packets
374 * are handled to avoid DAD issue.
376 ndmh = (struct nd_msg *)lyr3h;
377 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
378 i6addr = &ndmh->target;
379 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
388 ip4h = (struct iphdr *)lyr3h;
389 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
390 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
395 unsigned char *arp_ptr;
398 arph = (struct arphdr *)lyr3h;
399 arp_ptr = (unsigned char *)(arph + 1);
401 arp_ptr += (2 * port->dev->addr_len) + 4;
403 arp_ptr += port->dev->addr_len;
405 memcpy(&dip, arp_ptr, 4);
406 addr = ipvlan_ht_addr_lookup(port, &dip, false);
414 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
416 const struct iphdr *ip4h = ip_hdr(skb);
417 struct net_device *dev = skb->dev;
418 struct net *net = dev_net(dev);
420 int err, ret = NET_XMIT_DROP;
421 struct flowi4 fl4 = {
422 .flowi4_oif = dev->ifindex,
423 .flowi4_tos = RT_TOS(ip4h->tos),
424 .flowi4_flags = FLOWI_FLAG_ANYSRC,
425 .flowi4_mark = skb->mark,
426 .daddr = ip4h->daddr,
427 .saddr = ip4h->saddr,
430 rt = ip_route_output_flow(net, &fl4, NULL);
434 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
438 skb_dst_set(skb, &rt->dst);
440 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
442 err = ip_local_out(net, skb->sk, skb);
443 if (unlikely(net_xmit_eval(err)))
444 DEV_STATS_INC(dev, tx_errors);
446 ret = NET_XMIT_SUCCESS;
449 DEV_STATS_INC(dev, tx_errors);
455 #if IS_ENABLED(CONFIG_IPV6)
457 static noinline_for_stack int
458 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
460 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
461 struct flowi6 fl6 = {
462 .flowi6_oif = dev->ifindex,
463 .daddr = ip6h->daddr,
464 .saddr = ip6h->saddr,
465 .flowi6_flags = FLOWI_FLAG_ANYSRC,
466 .flowlabel = ip6_flowinfo(ip6h),
467 .flowi6_mark = skb->mark,
468 .flowi6_proto = ip6h->nexthdr,
470 struct dst_entry *dst;
473 dst = ip6_route_output(dev_net(dev), NULL, &fl6);
479 skb_dst_set(skb, dst);
483 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
485 struct net_device *dev = skb->dev;
486 int err, ret = NET_XMIT_DROP;
488 err = ipvlan_route_v6_outbound(dev, skb);
490 DEV_STATS_INC(dev, tx_errors);
495 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
497 err = ip6_local_out(dev_net(dev), skb->sk, skb);
498 if (unlikely(net_xmit_eval(err)))
499 DEV_STATS_INC(dev, tx_errors);
501 ret = NET_XMIT_SUCCESS;
505 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
507 return NET_XMIT_DROP;
511 static int ipvlan_process_outbound(struct sk_buff *skb)
513 int ret = NET_XMIT_DROP;
515 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
516 * will have L2; which need to discarded and processed further
517 * in the net-ns of the main-device.
519 if (skb_mac_header_was_set(skb)) {
520 /* In this mode we dont care about
521 * multicast and broadcast traffic */
522 struct ethhdr *ethh = eth_hdr(skb);
524 if (is_multicast_ether_addr(ethh->h_dest)) {
525 pr_debug_ratelimited(
526 "Dropped {multi|broad}cast of type=[%x]\n",
527 ntohs(skb->protocol));
532 skb_pull(skb, sizeof(*ethh));
533 skb->mac_header = (typeof(skb->mac_header))~0U;
534 skb_reset_network_header(skb);
537 if (skb->protocol == htons(ETH_P_IPV6))
538 ret = ipvlan_process_v6_outbound(skb);
539 else if (skb->protocol == htons(ETH_P_IP))
540 ret = ipvlan_process_v4_outbound(skb);
542 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
543 ntohs(skb->protocol));
550 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
551 struct sk_buff *skb, bool tx_pkt)
553 if (skb->protocol == htons(ETH_P_PAUSE)) {
558 /* Record that the deferred packet is from TX or RX path. By
559 * looking at mac-addresses on packet will lead to erronus decisions.
560 * (This would be true for a loopback-mode on master device or a
561 * hair-pin mode of the switch.)
563 IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
565 spin_lock(&port->backlog.lock);
566 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
568 __skb_queue_tail(&port->backlog, skb);
569 spin_unlock(&port->backlog.lock);
570 schedule_work(&port->wq);
572 spin_unlock(&port->backlog.lock);
573 dev_core_stats_rx_dropped_inc(skb->dev);
578 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
580 const struct ipvl_dev *ipvlan = netdev_priv(dev);
582 struct ipvl_addr *addr;
585 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
589 if (!ipvlan_is_vepa(ipvlan->port)) {
590 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
592 if (ipvlan_is_private(ipvlan->port)) {
594 return NET_XMIT_DROP;
596 ipvlan_rcv_frame(addr, &skb, true);
597 return NET_XMIT_SUCCESS;
601 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
602 return ipvlan_process_outbound(skb);
605 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
607 const struct ipvl_dev *ipvlan = netdev_priv(dev);
608 struct ethhdr *eth = skb_eth_hdr(skb);
609 struct ipvl_addr *addr;
613 if (!ipvlan_is_vepa(ipvlan->port) &&
614 ether_addr_equal(eth->h_dest, eth->h_source)) {
615 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
617 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
619 if (ipvlan_is_private(ipvlan->port)) {
621 return NET_XMIT_DROP;
623 ipvlan_rcv_frame(addr, &skb, true);
624 return NET_XMIT_SUCCESS;
627 skb = skb_share_check(skb, GFP_ATOMIC);
629 return NET_XMIT_DROP;
631 /* Packet definitely does not belong to any of the
632 * virtual devices, but the dest is local. So forward
633 * the skb for the main-dev. At the RX side we just return
634 * RX_PASS for it to be processed further on the stack.
636 dev_forward_skb(ipvlan->phy_dev, skb);
637 return NET_XMIT_SUCCESS;
639 } else if (is_multicast_ether_addr(eth->h_dest)) {
640 skb_reset_mac_header(skb);
641 ipvlan_skb_crossing_ns(skb, NULL);
642 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
643 return NET_XMIT_SUCCESS;
646 skb->dev = ipvlan->phy_dev;
647 return dev_queue_xmit(skb);
650 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
652 struct ipvl_dev *ipvlan = netdev_priv(dev);
653 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
658 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
663 return ipvlan_xmit_mode_l2(skb, dev);
665 #ifdef CONFIG_IPVLAN_L3S
666 case IPVLAN_MODE_L3S:
668 return ipvlan_xmit_mode_l3(skb, dev);
671 /* Should not reach here */
672 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
675 return NET_XMIT_DROP;
678 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
680 struct ethhdr *eth = eth_hdr(skb);
681 struct ipvl_addr *addr;
685 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
686 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
690 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
698 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
699 struct ipvl_port *port)
703 struct ipvl_addr *addr;
704 struct sk_buff *skb = *pskb;
705 rx_handler_result_t ret = RX_HANDLER_PASS;
707 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
711 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
713 ret = ipvlan_rcv_frame(addr, pskb, false);
719 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
720 struct ipvl_port *port)
722 struct sk_buff *skb = *pskb;
723 struct ethhdr *eth = eth_hdr(skb);
724 rx_handler_result_t ret = RX_HANDLER_PASS;
726 if (is_multicast_ether_addr(eth->h_dest)) {
727 if (ipvlan_external_frame(skb, port)) {
728 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
730 /* External frames are queued for device local
731 * distribution, but a copy is given to master
732 * straight away to avoid sending duplicates later
733 * when work-queue processes this frame. This is
734 * achieved by returning RX_HANDLER_PASS.
737 ipvlan_skb_crossing_ns(nskb, NULL);
738 ipvlan_multicast_enqueue(port, nskb, false);
742 /* Perform like l3 mode for non-multicast packet */
743 ret = ipvlan_handle_mode_l3(pskb, port);
749 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
751 struct sk_buff *skb = *pskb;
752 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
755 return RX_HANDLER_PASS;
757 switch (port->mode) {
759 return ipvlan_handle_mode_l2(pskb, port);
761 return ipvlan_handle_mode_l3(pskb, port);
762 #ifdef CONFIG_IPVLAN_L3S
763 case IPVLAN_MODE_L3S:
764 return RX_HANDLER_PASS;
768 /* Should not reach here */
769 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
771 return RX_HANDLER_CONSUMED;