Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-microblaze.git] / net / bridge / br_input.c
1 /*
2  *      Handle incoming frames
3  *      Linux ethernet bridge
4  *
5  *      Authors:
6  *      Lennert Buytenhek               <buytenh@gnu.org>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 #include <linux/slab.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/netfilter_bridge.h>
19 #include <linux/neighbour.h>
20 #include <net/arp.h>
21 #include <linux/export.h>
22 #include <linux/rculist.h>
23 #include "br_private.h"
24 #include "br_private_tunnel.h"
25
26 /* Hook for brouter */
27 br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
28 EXPORT_SYMBOL(br_should_route_hook);
29
30 static int
31 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
32 {
33         br_drop_fake_rtable(skb);
34         return netif_receive_skb(skb);
35 }
36
37 static int br_pass_frame_up(struct sk_buff *skb)
38 {
39         struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
40         struct net_bridge *br = netdev_priv(brdev);
41         struct net_bridge_vlan_group *vg;
42         struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
43
44         u64_stats_update_begin(&brstats->syncp);
45         brstats->rx_packets++;
46         brstats->rx_bytes += skb->len;
47         u64_stats_update_end(&brstats->syncp);
48
49         vg = br_vlan_group_rcu(br);
50         /* Bridge is just like any other port.  Make sure the
51          * packet is allowed except in promisc modue when someone
52          * may be running packet capture.
53          */
54         if (!(brdev->flags & IFF_PROMISC) &&
55             !br_allowed_egress(vg, skb)) {
56                 kfree_skb(skb);
57                 return NET_RX_DROP;
58         }
59
60         indev = skb->dev;
61         skb->dev = brdev;
62         skb = br_handle_vlan(br, NULL, vg, skb);
63         if (!skb)
64                 return NET_RX_DROP;
65         /* update the multicast stats if the packet is IGMP/MLD */
66         br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
67                            BR_MCAST_DIR_TX);
68
69         return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
70                        dev_net(indev), NULL, skb, indev, NULL,
71                        br_netif_receive_skb);
72 }
73
74 /* note: already called with rcu_read_lock */
75 int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
76 {
77         struct net_bridge_port *p = br_port_get_rcu(skb->dev);
78         enum br_pkt_type pkt_type = BR_PKT_UNICAST;
79         struct net_bridge_fdb_entry *dst = NULL;
80         struct net_bridge_mdb_entry *mdst;
81         bool local_rcv, mcast_hit = false;
82         const unsigned char *dest;
83         struct net_bridge *br;
84         u16 vid = 0;
85
86         if (!p || p->state == BR_STATE_DISABLED)
87                 goto drop;
88
89         if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
90                 goto out;
91
92         nbp_switchdev_frame_mark(p, skb);
93
94         /* insert into forwarding database after filtering to avoid spoofing */
95         br = p->br;
96         if (p->flags & BR_LEARNING)
97                 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
98
99         local_rcv = !!(br->dev->flags & IFF_PROMISC);
100         dest = eth_hdr(skb)->h_dest;
101         if (is_multicast_ether_addr(dest)) {
102                 /* by definition the broadcast is also a multicast address */
103                 if (is_broadcast_ether_addr(dest)) {
104                         pkt_type = BR_PKT_BROADCAST;
105                         local_rcv = true;
106                 } else {
107                         pkt_type = BR_PKT_MULTICAST;
108                         if (br_multicast_rcv(br, p, skb, vid))
109                                 goto drop;
110                 }
111         }
112
113         if (p->state == BR_STATE_LEARNING)
114                 goto drop;
115
116         BR_INPUT_SKB_CB(skb)->brdev = br->dev;
117         BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
118
119         if (IS_ENABLED(CONFIG_INET) &&
120             (skb->protocol == htons(ETH_P_ARP) ||
121              skb->protocol == htons(ETH_P_RARP))) {
122                 br_do_proxy_suppress_arp(skb, br, vid, p);
123         } else if (IS_ENABLED(CONFIG_IPV6) &&
124                    skb->protocol == htons(ETH_P_IPV6) &&
125                    br->neigh_suppress_enabled &&
126                    pskb_may_pull(skb, sizeof(struct ipv6hdr) +
127                                  sizeof(struct nd_msg)) &&
128                    ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
129                         struct nd_msg *msg, _msg;
130
131                         msg = br_is_nd_neigh_msg(skb, &_msg);
132                         if (msg)
133                                 br_do_suppress_nd(skb, br, vid, p, msg);
134         }
135
136         switch (pkt_type) {
137         case BR_PKT_MULTICAST:
138                 mdst = br_mdb_get(br, skb, vid);
139                 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
140                     br_multicast_querier_exists(br, eth_hdr(skb))) {
141                         if ((mdst && mdst->host_joined) ||
142                             br_multicast_is_router(br)) {
143                                 local_rcv = true;
144                                 br->dev->stats.multicast++;
145                         }
146                         mcast_hit = true;
147                 } else {
148                         local_rcv = true;
149                         br->dev->stats.multicast++;
150                 }
151                 break;
152         case BR_PKT_UNICAST:
153                 dst = br_fdb_find_rcu(br, dest, vid);
154         default:
155                 break;
156         }
157
158         if (dst) {
159                 unsigned long now = jiffies;
160
161                 if (dst->is_local)
162                         return br_pass_frame_up(skb);
163
164                 if (now != dst->used)
165                         dst->used = now;
166                 br_forward(dst->dst, skb, local_rcv, false);
167         } else {
168                 if (!mcast_hit)
169                         br_flood(br, skb, pkt_type, local_rcv, false);
170                 else
171                         br_multicast_flood(mdst, skb, local_rcv, false);
172         }
173
174         if (local_rcv)
175                 return br_pass_frame_up(skb);
176
177 out:
178         return 0;
179 drop:
180         kfree_skb(skb);
181         goto out;
182 }
183 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
184
185 static void __br_handle_local_finish(struct sk_buff *skb)
186 {
187         struct net_bridge_port *p = br_port_get_rcu(skb->dev);
188         u16 vid = 0;
189
190         /* check if vlan is allowed, to avoid spoofing */
191         if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
192                 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
193 }
194
195 /* note: already called with rcu_read_lock */
196 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
197 {
198         struct net_bridge_port *p = br_port_get_rcu(skb->dev);
199
200         __br_handle_local_finish(skb);
201
202         BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
203         br_pass_frame_up(skb);
204         return 0;
205 }
206
207 /*
208  * Return NULL if skb is handled
209  * note: already called with rcu_read_lock
210  */
211 rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
212 {
213         struct net_bridge_port *p;
214         struct sk_buff *skb = *pskb;
215         const unsigned char *dest = eth_hdr(skb)->h_dest;
216         br_should_route_hook_t *rhook;
217
218         if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
219                 return RX_HANDLER_PASS;
220
221         if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
222                 goto drop;
223
224         skb = skb_share_check(skb, GFP_ATOMIC);
225         if (!skb)
226                 return RX_HANDLER_CONSUMED;
227
228         p = br_port_get_rcu(skb->dev);
229         if (p->flags & BR_VLAN_TUNNEL) {
230                 if (br_handle_ingress_vlan_tunnel(skb, p,
231                                                   nbp_vlan_group_rcu(p)))
232                         goto drop;
233         }
234
235         if (unlikely(is_link_local_ether_addr(dest))) {
236                 u16 fwd_mask = p->br->group_fwd_mask_required;
237
238                 /*
239                  * See IEEE 802.1D Table 7-10 Reserved addresses
240                  *
241                  * Assignment                           Value
242                  * Bridge Group Address         01-80-C2-00-00-00
243                  * (MAC Control) 802.3          01-80-C2-00-00-01
244                  * (Link Aggregation) 802.3     01-80-C2-00-00-02
245                  * 802.1X PAE address           01-80-C2-00-00-03
246                  *
247                  * 802.1AB LLDP                 01-80-C2-00-00-0E
248                  *
249                  * Others reserved for future standardization
250                  */
251                 fwd_mask |= p->group_fwd_mask;
252                 switch (dest[5]) {
253                 case 0x00:      /* Bridge Group Address */
254                         /* If STP is turned off,
255                            then must forward to keep loop detection */
256                         if (p->br->stp_enabled == BR_NO_STP ||
257                             fwd_mask & (1u << dest[5]))
258                                 goto forward;
259                         *pskb = skb;
260                         __br_handle_local_finish(skb);
261                         return RX_HANDLER_PASS;
262
263                 case 0x01:      /* IEEE MAC (Pause) */
264                         goto drop;
265
266                 case 0x0E:      /* 802.1AB LLDP */
267                         fwd_mask |= p->br->group_fwd_mask;
268                         if (fwd_mask & (1u << dest[5]))
269                                 goto forward;
270                         *pskb = skb;
271                         __br_handle_local_finish(skb);
272                         return RX_HANDLER_PASS;
273
274                 default:
275                         /* Allow selective forwarding for most other protocols */
276                         fwd_mask |= p->br->group_fwd_mask;
277                         if (fwd_mask & (1u << dest[5]))
278                                 goto forward;
279                 }
280
281                 /* Deliver packet to local host only */
282                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
283                         NULL, skb, skb->dev, NULL, br_handle_local_finish);
284                 return RX_HANDLER_CONSUMED;
285         }
286
287 forward:
288         switch (p->state) {
289         case BR_STATE_FORWARDING:
290                 rhook = rcu_dereference(br_should_route_hook);
291                 if (rhook) {
292                         if ((*rhook)(skb)) {
293                                 *pskb = skb;
294                                 return RX_HANDLER_PASS;
295                         }
296                         dest = eth_hdr(skb)->h_dest;
297                 }
298                 /* fall through */
299         case BR_STATE_LEARNING:
300                 if (ether_addr_equal(p->br->dev->dev_addr, dest))
301                         skb->pkt_type = PACKET_HOST;
302
303                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
304                         dev_net(skb->dev), NULL, skb, skb->dev, NULL,
305                         br_handle_frame_finish);
306                 break;
307         default:
308 drop:
309                 kfree_skb(skb);
310         }
311         return RX_HANDLER_CONSUMED;
312 }