Merge tag 'rpmsg-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux-2.6-microblaze.git] / drivers / net / ipvlan / ipvlan_core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3  */
4
5 #include "ipvlan.h"
6
7 static u32 ipvlan_jhash_secret __read_mostly;
8
9 void ipvlan_init_secret(void)
10 {
11         net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
12 }
13
14 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
15                             unsigned int len, bool success, bool mcast)
16 {
17         if (likely(success)) {
18                 struct ipvl_pcpu_stats *pcptr;
19
20                 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
21                 u64_stats_update_begin(&pcptr->syncp);
22                 u64_stats_inc(&pcptr->rx_pkts);
23                 u64_stats_add(&pcptr->rx_bytes, len);
24                 if (mcast)
25                         u64_stats_inc(&pcptr->rx_mcast);
26                 u64_stats_update_end(&pcptr->syncp);
27         } else {
28                 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
29         }
30 }
31 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
32
33 #if IS_ENABLED(CONFIG_IPV6)
34 static u8 ipvlan_get_v6_hash(const void *iaddr)
35 {
36         const struct in6_addr *ip6_addr = iaddr;
37
38         return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
39                IPVLAN_HASH_MASK;
40 }
41 #else
42 static u8 ipvlan_get_v6_hash(const void *iaddr)
43 {
44         return 0;
45 }
46 #endif
47
48 static u8 ipvlan_get_v4_hash(const void *iaddr)
49 {
50         const struct in_addr *ip4_addr = iaddr;
51
52         return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
53                IPVLAN_HASH_MASK;
54 }
55
56 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
57 {
58         if (!is_v6 && addr->atype == IPVL_IPV4) {
59                 struct in_addr *i4addr = (struct in_addr *)iaddr;
60
61                 return addr->ip4addr.s_addr == i4addr->s_addr;
62 #if IS_ENABLED(CONFIG_IPV6)
63         } else if (is_v6 && addr->atype == IPVL_IPV6) {
64                 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
65
66                 return ipv6_addr_equal(&addr->ip6addr, i6addr);
67 #endif
68         }
69
70         return false;
71 }
72
73 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
74                                                const void *iaddr, bool is_v6)
75 {
76         struct ipvl_addr *addr;
77         u8 hash;
78
79         hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
80                ipvlan_get_v4_hash(iaddr);
81         hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
82                 if (addr_equal(is_v6, addr, iaddr))
83                         return addr;
84         return NULL;
85 }
86
87 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
88 {
89         struct ipvl_port *port = ipvlan->port;
90         u8 hash;
91
92         hash = (addr->atype == IPVL_IPV6) ?
93                ipvlan_get_v6_hash(&addr->ip6addr) :
94                ipvlan_get_v4_hash(&addr->ip4addr);
95         if (hlist_unhashed(&addr->hlnode))
96                 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
97 }
98
99 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
100 {
101         hlist_del_init_rcu(&addr->hlnode);
102 }
103
104 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
105                                    const void *iaddr, bool is_v6)
106 {
107         struct ipvl_addr *addr, *ret = NULL;
108
109         rcu_read_lock();
110         list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
111                 if (addr_equal(is_v6, addr, iaddr)) {
112                         ret = addr;
113                         break;
114                 }
115         }
116         rcu_read_unlock();
117         return ret;
118 }
119
120 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
121 {
122         struct ipvl_dev *ipvlan;
123         bool ret = false;
124
125         rcu_read_lock();
126         list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
127                 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
128                         ret = true;
129                         break;
130                 }
131         }
132         rcu_read_unlock();
133         return ret;
134 }
135
136 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
137 {
138         void *lyr3h = NULL;
139
140         switch (skb->protocol) {
141         case htons(ETH_P_ARP): {
142                 struct arphdr *arph;
143
144                 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
145                         return NULL;
146
147                 arph = arp_hdr(skb);
148                 *type = IPVL_ARP;
149                 lyr3h = arph;
150                 break;
151         }
152         case htons(ETH_P_IP): {
153                 u32 pktlen;
154                 struct iphdr *ip4h;
155
156                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
157                         return NULL;
158
159                 ip4h = ip_hdr(skb);
160                 pktlen = skb_ip_totlen(skb);
161                 if (ip4h->ihl < 5 || ip4h->version != 4)
162                         return NULL;
163                 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
164                         return NULL;
165
166                 *type = IPVL_IPV4;
167                 lyr3h = ip4h;
168                 break;
169         }
170 #if IS_ENABLED(CONFIG_IPV6)
171         case htons(ETH_P_IPV6): {
172                 struct ipv6hdr *ip6h;
173
174                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
175                         return NULL;
176
177                 ip6h = ipv6_hdr(skb);
178                 if (ip6h->version != 6)
179                         return NULL;
180
181                 *type = IPVL_IPV6;
182                 lyr3h = ip6h;
183                 /* Only Neighbour Solicitation pkts need different treatment */
184                 if (ipv6_addr_any(&ip6h->saddr) &&
185                     ip6h->nexthdr == NEXTHDR_ICMP) {
186                         struct icmp6hdr *icmph;
187
188                         if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
189                                 return NULL;
190
191                         ip6h = ipv6_hdr(skb);
192                         icmph = (struct icmp6hdr *)(ip6h + 1);
193
194                         if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
195                                 /* Need to access the ipv6 address in body */
196                                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
197                                                 + sizeof(struct in6_addr))))
198                                         return NULL;
199
200                                 ip6h = ipv6_hdr(skb);
201                                 icmph = (struct icmp6hdr *)(ip6h + 1);
202                         }
203
204                         *type = IPVL_ICMPV6;
205                         lyr3h = icmph;
206                 }
207                 break;
208         }
209 #endif
210         default:
211                 return NULL;
212         }
213
214         return lyr3h;
215 }
216
217 unsigned int ipvlan_mac_hash(const unsigned char *addr)
218 {
219         u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
220                                ipvlan_jhash_secret);
221
222         return hash & IPVLAN_MAC_FILTER_MASK;
223 }
224
225 void ipvlan_process_multicast(struct work_struct *work)
226 {
227         struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
228         struct ethhdr *ethh;
229         struct ipvl_dev *ipvlan;
230         struct sk_buff *skb, *nskb;
231         struct sk_buff_head list;
232         unsigned int len;
233         unsigned int mac_hash;
234         int ret;
235         u8 pkt_type;
236         bool tx_pkt;
237
238         __skb_queue_head_init(&list);
239
240         spin_lock_bh(&port->backlog.lock);
241         skb_queue_splice_tail_init(&port->backlog, &list);
242         spin_unlock_bh(&port->backlog.lock);
243
244         while ((skb = __skb_dequeue(&list)) != NULL) {
245                 struct net_device *dev = skb->dev;
246                 bool consumed = false;
247
248                 ethh = eth_hdr(skb);
249                 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
250                 mac_hash = ipvlan_mac_hash(ethh->h_dest);
251
252                 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
253                         pkt_type = PACKET_BROADCAST;
254                 else
255                         pkt_type = PACKET_MULTICAST;
256
257                 rcu_read_lock();
258                 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
259                         if (tx_pkt && (ipvlan->dev == skb->dev))
260                                 continue;
261                         if (!test_bit(mac_hash, ipvlan->mac_filters))
262                                 continue;
263                         if (!(ipvlan->dev->flags & IFF_UP))
264                                 continue;
265                         ret = NET_RX_DROP;
266                         len = skb->len + ETH_HLEN;
267                         nskb = skb_clone(skb, GFP_ATOMIC);
268                         local_bh_disable();
269                         if (nskb) {
270                                 consumed = true;
271                                 nskb->pkt_type = pkt_type;
272                                 nskb->dev = ipvlan->dev;
273                                 if (tx_pkt)
274                                         ret = dev_forward_skb(ipvlan->dev, nskb);
275                                 else
276                                         ret = netif_rx(nskb);
277                         }
278                         ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
279                         local_bh_enable();
280                 }
281                 rcu_read_unlock();
282
283                 if (tx_pkt) {
284                         /* If the packet originated here, send it out. */
285                         skb->dev = port->dev;
286                         skb->pkt_type = pkt_type;
287                         dev_queue_xmit(skb);
288                 } else {
289                         if (consumed)
290                                 consume_skb(skb);
291                         else
292                                 kfree_skb(skb);
293                 }
294                 dev_put(dev);
295                 cond_resched();
296         }
297 }
298
299 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
300 {
301         bool xnet = true;
302
303         if (dev)
304                 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
305
306         skb_scrub_packet(skb, xnet);
307         if (dev)
308                 skb->dev = dev;
309 }
310
311 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
312                             bool local)
313 {
314         struct ipvl_dev *ipvlan = addr->master;
315         struct net_device *dev = ipvlan->dev;
316         unsigned int len;
317         rx_handler_result_t ret = RX_HANDLER_CONSUMED;
318         bool success = false;
319         struct sk_buff *skb = *pskb;
320
321         len = skb->len + ETH_HLEN;
322         /* Only packets exchanged between two local slaves need to have
323          * device-up check as well as skb-share check.
324          */
325         if (local) {
326                 if (unlikely(!(dev->flags & IFF_UP))) {
327                         kfree_skb(skb);
328                         goto out;
329                 }
330
331                 skb = skb_share_check(skb, GFP_ATOMIC);
332                 if (!skb)
333                         goto out;
334
335                 *pskb = skb;
336         }
337
338         if (local) {
339                 skb->pkt_type = PACKET_HOST;
340                 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
341                         success = true;
342         } else {
343                 skb->dev = dev;
344                 ret = RX_HANDLER_ANOTHER;
345                 success = true;
346         }
347
348 out:
349         ipvlan_count_rx(ipvlan, len, success, false);
350         return ret;
351 }
352
353 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
354                                      int addr_type, bool use_dest)
355 {
356         struct ipvl_addr *addr = NULL;
357
358         switch (addr_type) {
359 #if IS_ENABLED(CONFIG_IPV6)
360         case IPVL_IPV6: {
361                 struct ipv6hdr *ip6h;
362                 struct in6_addr *i6addr;
363
364                 ip6h = (struct ipv6hdr *)lyr3h;
365                 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
366                 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
367                 break;
368         }
369         case IPVL_ICMPV6: {
370                 struct nd_msg *ndmh;
371                 struct in6_addr *i6addr;
372
373                 /* Make sure that the NeighborSolicitation ICMPv6 packets
374                  * are handled to avoid DAD issue.
375                  */
376                 ndmh = (struct nd_msg *)lyr3h;
377                 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
378                         i6addr = &ndmh->target;
379                         addr = ipvlan_ht_addr_lookup(port, i6addr, true);
380                 }
381                 break;
382         }
383 #endif
384         case IPVL_IPV4: {
385                 struct iphdr *ip4h;
386                 __be32 *i4addr;
387
388                 ip4h = (struct iphdr *)lyr3h;
389                 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
390                 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
391                 break;
392         }
393         case IPVL_ARP: {
394                 struct arphdr *arph;
395                 unsigned char *arp_ptr;
396                 __be32 dip;
397
398                 arph = (struct arphdr *)lyr3h;
399                 arp_ptr = (unsigned char *)(arph + 1);
400                 if (use_dest)
401                         arp_ptr += (2 * port->dev->addr_len) + 4;
402                 else
403                         arp_ptr += port->dev->addr_len;
404
405                 memcpy(&dip, arp_ptr, 4);
406                 addr = ipvlan_ht_addr_lookup(port, &dip, false);
407                 break;
408         }
409         }
410
411         return addr;
412 }
413
414 static int ipvlan_process_v4_outbound(struct sk_buff *skb)
415 {
416         const struct iphdr *ip4h = ip_hdr(skb);
417         struct net_device *dev = skb->dev;
418         struct net *net = dev_net(dev);
419         struct rtable *rt;
420         int err, ret = NET_XMIT_DROP;
421         struct flowi4 fl4 = {
422                 .flowi4_oif = dev->ifindex,
423                 .flowi4_tos = RT_TOS(ip4h->tos),
424                 .flowi4_flags = FLOWI_FLAG_ANYSRC,
425                 .flowi4_mark = skb->mark,
426                 .daddr = ip4h->daddr,
427                 .saddr = ip4h->saddr,
428         };
429
430         rt = ip_route_output_flow(net, &fl4, NULL);
431         if (IS_ERR(rt))
432                 goto err;
433
434         if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
435                 ip_rt_put(rt);
436                 goto err;
437         }
438         skb_dst_set(skb, &rt->dst);
439
440         memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
441
442         err = ip_local_out(net, skb->sk, skb);
443         if (unlikely(net_xmit_eval(err)))
444                 DEV_STATS_INC(dev, tx_errors);
445         else
446                 ret = NET_XMIT_SUCCESS;
447         goto out;
448 err:
449         DEV_STATS_INC(dev, tx_errors);
450         kfree_skb(skb);
451 out:
452         return ret;
453 }
454
455 #if IS_ENABLED(CONFIG_IPV6)
456 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
457 {
458         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
459         struct net_device *dev = skb->dev;
460         struct net *net = dev_net(dev);
461         struct dst_entry *dst;
462         int err, ret = NET_XMIT_DROP;
463         struct flowi6 fl6 = {
464                 .flowi6_oif = dev->ifindex,
465                 .daddr = ip6h->daddr,
466                 .saddr = ip6h->saddr,
467                 .flowi6_flags = FLOWI_FLAG_ANYSRC,
468                 .flowlabel = ip6_flowinfo(ip6h),
469                 .flowi6_mark = skb->mark,
470                 .flowi6_proto = ip6h->nexthdr,
471         };
472
473         dst = ip6_route_output(net, NULL, &fl6);
474         if (dst->error) {
475                 ret = dst->error;
476                 dst_release(dst);
477                 goto err;
478         }
479         skb_dst_set(skb, dst);
480
481         memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
482
483         err = ip6_local_out(net, skb->sk, skb);
484         if (unlikely(net_xmit_eval(err)))
485                 DEV_STATS_INC(dev, tx_errors);
486         else
487                 ret = NET_XMIT_SUCCESS;
488         goto out;
489 err:
490         DEV_STATS_INC(dev, tx_errors);
491         kfree_skb(skb);
492 out:
493         return ret;
494 }
495 #else
496 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
497 {
498         return NET_XMIT_DROP;
499 }
500 #endif
501
502 static int ipvlan_process_outbound(struct sk_buff *skb)
503 {
504         int ret = NET_XMIT_DROP;
505
506         /* The ipvlan is a pseudo-L2 device, so the packets that we receive
507          * will have L2; which need to discarded and processed further
508          * in the net-ns of the main-device.
509          */
510         if (skb_mac_header_was_set(skb)) {
511                 /* In this mode we dont care about
512                  * multicast and broadcast traffic */
513                 struct ethhdr *ethh = eth_hdr(skb);
514
515                 if (is_multicast_ether_addr(ethh->h_dest)) {
516                         pr_debug_ratelimited(
517                                 "Dropped {multi|broad}cast of type=[%x]\n",
518                                 ntohs(skb->protocol));
519                         kfree_skb(skb);
520                         goto out;
521                 }
522
523                 skb_pull(skb, sizeof(*ethh));
524                 skb->mac_header = (typeof(skb->mac_header))~0U;
525                 skb_reset_network_header(skb);
526         }
527
528         if (skb->protocol == htons(ETH_P_IPV6))
529                 ret = ipvlan_process_v6_outbound(skb);
530         else if (skb->protocol == htons(ETH_P_IP))
531                 ret = ipvlan_process_v4_outbound(skb);
532         else {
533                 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
534                                     ntohs(skb->protocol));
535                 kfree_skb(skb);
536         }
537 out:
538         return ret;
539 }
540
541 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
542                                      struct sk_buff *skb, bool tx_pkt)
543 {
544         if (skb->protocol == htons(ETH_P_PAUSE)) {
545                 kfree_skb(skb);
546                 return;
547         }
548
549         /* Record that the deferred packet is from TX or RX path. By
550          * looking at mac-addresses on packet will lead to erronus decisions.
551          * (This would be true for a loopback-mode on master device or a
552          * hair-pin mode of the switch.)
553          */
554         IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
555
556         spin_lock(&port->backlog.lock);
557         if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
558                 dev_hold(skb->dev);
559                 __skb_queue_tail(&port->backlog, skb);
560                 spin_unlock(&port->backlog.lock);
561                 schedule_work(&port->wq);
562         } else {
563                 spin_unlock(&port->backlog.lock);
564                 dev_core_stats_rx_dropped_inc(skb->dev);
565                 kfree_skb(skb);
566         }
567 }
568
569 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
570 {
571         const struct ipvl_dev *ipvlan = netdev_priv(dev);
572         void *lyr3h;
573         struct ipvl_addr *addr;
574         int addr_type;
575
576         lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
577         if (!lyr3h)
578                 goto out;
579
580         if (!ipvlan_is_vepa(ipvlan->port)) {
581                 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
582                 if (addr) {
583                         if (ipvlan_is_private(ipvlan->port)) {
584                                 consume_skb(skb);
585                                 return NET_XMIT_DROP;
586                         }
587                         ipvlan_rcv_frame(addr, &skb, true);
588                         return NET_XMIT_SUCCESS;
589                 }
590         }
591 out:
592         ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
593         return ipvlan_process_outbound(skb);
594 }
595
596 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
597 {
598         const struct ipvl_dev *ipvlan = netdev_priv(dev);
599         struct ethhdr *eth = skb_eth_hdr(skb);
600         struct ipvl_addr *addr;
601         void *lyr3h;
602         int addr_type;
603
604         if (!ipvlan_is_vepa(ipvlan->port) &&
605             ether_addr_equal(eth->h_dest, eth->h_source)) {
606                 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
607                 if (lyr3h) {
608                         addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
609                         if (addr) {
610                                 if (ipvlan_is_private(ipvlan->port)) {
611                                         consume_skb(skb);
612                                         return NET_XMIT_DROP;
613                                 }
614                                 ipvlan_rcv_frame(addr, &skb, true);
615                                 return NET_XMIT_SUCCESS;
616                         }
617                 }
618                 skb = skb_share_check(skb, GFP_ATOMIC);
619                 if (!skb)
620                         return NET_XMIT_DROP;
621
622                 /* Packet definitely does not belong to any of the
623                  * virtual devices, but the dest is local. So forward
624                  * the skb for the main-dev. At the RX side we just return
625                  * RX_PASS for it to be processed further on the stack.
626                  */
627                 dev_forward_skb(ipvlan->phy_dev, skb);
628                 return NET_XMIT_SUCCESS;
629
630         } else if (is_multicast_ether_addr(eth->h_dest)) {
631                 skb_reset_mac_header(skb);
632                 ipvlan_skb_crossing_ns(skb, NULL);
633                 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
634                 return NET_XMIT_SUCCESS;
635         }
636
637         skb->dev = ipvlan->phy_dev;
638         return dev_queue_xmit(skb);
639 }
640
641 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
642 {
643         struct ipvl_dev *ipvlan = netdev_priv(dev);
644         struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
645
646         if (!port)
647                 goto out;
648
649         if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
650                 goto out;
651
652         switch(port->mode) {
653         case IPVLAN_MODE_L2:
654                 return ipvlan_xmit_mode_l2(skb, dev);
655         case IPVLAN_MODE_L3:
656 #ifdef CONFIG_IPVLAN_L3S
657         case IPVLAN_MODE_L3S:
658 #endif
659                 return ipvlan_xmit_mode_l3(skb, dev);
660         }
661
662         /* Should not reach here */
663         WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
664 out:
665         kfree_skb(skb);
666         return NET_XMIT_DROP;
667 }
668
669 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
670 {
671         struct ethhdr *eth = eth_hdr(skb);
672         struct ipvl_addr *addr;
673         void *lyr3h;
674         int addr_type;
675
676         if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
677                 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
678                 if (!lyr3h)
679                         return true;
680
681                 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
682                 if (addr)
683                         return false;
684         }
685
686         return true;
687 }
688
689 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
690                                                  struct ipvl_port *port)
691 {
692         void *lyr3h;
693         int addr_type;
694         struct ipvl_addr *addr;
695         struct sk_buff *skb = *pskb;
696         rx_handler_result_t ret = RX_HANDLER_PASS;
697
698         lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
699         if (!lyr3h)
700                 goto out;
701
702         addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
703         if (addr)
704                 ret = ipvlan_rcv_frame(addr, pskb, false);
705
706 out:
707         return ret;
708 }
709
710 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
711                                                  struct ipvl_port *port)
712 {
713         struct sk_buff *skb = *pskb;
714         struct ethhdr *eth = eth_hdr(skb);
715         rx_handler_result_t ret = RX_HANDLER_PASS;
716
717         if (is_multicast_ether_addr(eth->h_dest)) {
718                 if (ipvlan_external_frame(skb, port)) {
719                         struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
720
721                         /* External frames are queued for device local
722                          * distribution, but a copy is given to master
723                          * straight away to avoid sending duplicates later
724                          * when work-queue processes this frame. This is
725                          * achieved by returning RX_HANDLER_PASS.
726                          */
727                         if (nskb) {
728                                 ipvlan_skb_crossing_ns(nskb, NULL);
729                                 ipvlan_multicast_enqueue(port, nskb, false);
730                         }
731                 }
732         } else {
733                 /* Perform like l3 mode for non-multicast packet */
734                 ret = ipvlan_handle_mode_l3(pskb, port);
735         }
736
737         return ret;
738 }
739
740 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
741 {
742         struct sk_buff *skb = *pskb;
743         struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
744
745         if (!port)
746                 return RX_HANDLER_PASS;
747
748         switch (port->mode) {
749         case IPVLAN_MODE_L2:
750                 return ipvlan_handle_mode_l2(pskb, port);
751         case IPVLAN_MODE_L3:
752                 return ipvlan_handle_mode_l3(pskb, port);
753 #ifdef CONFIG_IPVLAN_L3S
754         case IPVLAN_MODE_L3S:
755                 return RX_HANDLER_PASS;
756 #endif
757         }
758
759         /* Should not reach here */
760         WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
761         kfree_skb(skb);
762         return RX_HANDLER_CONSUMED;
763 }