1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/sch_generic.h>
15 #include <net/net_namespace.h>
18 #include <net/udp_tunnel.h>
22 #include <uapi/linux/amt.h>
23 #include <linux/security.h>
24 #include <net/gro_cells.h>
26 #include <net/if_inet6.h>
27 #include <net/ndisc.h>
28 #include <net/addrconf.h>
29 #include <net/ip6_route.h>
30 #include <net/inet_common.h>
31 #include <net/ip6_checksum.h>
33 static struct workqueue_struct *amt_wq;
35 static HLIST_HEAD(source_gc_list);
36 /* Lock for source_gc_list */
37 static spinlock_t source_gc_lock;
38 static struct delayed_work source_gc_wq;
39 static char *status_str[] = {
41 "AMT_STATUS_SENT_DISCOVERY",
42 "AMT_STATUS_RECEIVED_DISCOVERY",
43 "AMT_STATUS_SENT_ADVERTISEMENT",
44 "AMT_STATUS_RECEIVED_ADVERTISEMENT",
45 "AMT_STATUS_SENT_REQUEST",
46 "AMT_STATUS_RECEIVED_REQUEST",
47 "AMT_STATUS_SENT_QUERY",
48 "AMT_STATUS_RECEIVED_QUERY",
49 "AMT_STATUS_SENT_UPDATE",
50 "AMT_STATUS_RECEIVED_UPDATE",
53 static char *type_str[] = {
55 "AMT_MSG_ADVERTISEMENT",
57 "AMT_MSG_MEMBERSHIP_QUERY",
58 "AMT_MSG_MEMBERSHIP_UPDATE",
59 "AMT_MSG_MULTICAST_DATA",
63 static char *action_str[] = {
67 "AMT_ACT_STATUS_FWD_NEW",
68 "AMT_ACT_STATUS_D_FWD_NEW",
69 "AMT_ACT_STATUS_NONE_NEW",
72 static struct igmpv3_grec igmpv3_zero_grec;
74 #if IS_ENABLED(CONFIG_IPV6)
75 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
76 static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
77 static struct mld2_grec mldv2_zero_grec;
80 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
82 BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
83 sizeof_field(struct sk_buff, cb));
85 return (struct amt_skb_cb *)((void *)skb->cb +
86 sizeof(struct qdisc_skb_cb));
89 static void __amt_source_gc_work(void)
91 struct amt_source_node *snode;
92 struct hlist_head gc_list;
95 spin_lock_bh(&source_gc_lock);
96 hlist_move_list(&source_gc_list, &gc_list);
97 spin_unlock_bh(&source_gc_lock);
99 hlist_for_each_entry_safe(snode, t, &gc_list, node) {
100 hlist_del_rcu(&snode->node);
101 kfree_rcu(snode, rcu);
105 static void amt_source_gc_work(struct work_struct *work)
107 __amt_source_gc_work();
109 spin_lock_bh(&source_gc_lock);
110 mod_delayed_work(amt_wq, &source_gc_wq,
111 msecs_to_jiffies(AMT_GC_INTERVAL));
112 spin_unlock_bh(&source_gc_lock);
115 static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
117 return !memcmp(a, b, sizeof(union amt_addr));
120 static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
122 u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
124 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
127 static bool amt_status_filter(struct amt_source_node *snode,
128 enum amt_filter filter)
134 if (snode->status == AMT_SOURCE_STATUS_FWD &&
135 snode->flags == AMT_SOURCE_OLD)
138 case AMT_FILTER_D_FWD:
139 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
140 snode->flags == AMT_SOURCE_OLD)
143 case AMT_FILTER_FWD_NEW:
144 if (snode->status == AMT_SOURCE_STATUS_FWD &&
145 snode->flags == AMT_SOURCE_NEW)
148 case AMT_FILTER_D_FWD_NEW:
149 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
150 snode->flags == AMT_SOURCE_NEW)
156 case AMT_FILTER_NONE_NEW:
157 if (snode->status == AMT_SOURCE_STATUS_NONE &&
158 snode->flags == AMT_SOURCE_NEW)
161 case AMT_FILTER_BOTH:
162 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
163 snode->status == AMT_SOURCE_STATUS_FWD) &&
164 snode->flags == AMT_SOURCE_OLD)
167 case AMT_FILTER_BOTH_NEW:
168 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
169 snode->status == AMT_SOURCE_STATUS_FWD) &&
170 snode->flags == AMT_SOURCE_NEW)
181 static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
182 struct amt_group_node *gnode,
183 enum amt_filter filter,
186 u32 hash = amt_source_hash(tunnel, src);
187 struct amt_source_node *snode;
189 hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
190 if (amt_status_filter(snode, filter) &&
191 amt_addr_equal(&snode->source_addr, src))
197 static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
199 u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
201 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
204 static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
205 union amt_addr *group,
206 union amt_addr *host,
209 u32 hash = amt_group_hash(tunnel, group);
210 struct amt_group_node *gnode;
212 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
213 if (amt_addr_equal(&gnode->group_addr, group) &&
214 amt_addr_equal(&gnode->host_addr, host) &&
222 static void amt_destroy_source(struct amt_source_node *snode)
224 struct amt_group_node *gnode = snode->gnode;
225 struct amt_tunnel_list *tunnel;
227 tunnel = gnode->tunnel_list;
230 netdev_dbg(snode->gnode->amt->dev,
231 "Delete source %pI4 from %pI4\n",
232 &snode->source_addr.ip4,
233 &gnode->group_addr.ip4);
234 #if IS_ENABLED(CONFIG_IPV6)
236 netdev_dbg(snode->gnode->amt->dev,
237 "Delete source %pI6 from %pI6\n",
238 &snode->source_addr.ip6,
239 &gnode->group_addr.ip6);
243 cancel_delayed_work(&snode->source_timer);
244 hlist_del_init_rcu(&snode->node);
245 tunnel->nr_sources--;
247 spin_lock_bh(&source_gc_lock);
248 hlist_add_head_rcu(&snode->node, &source_gc_list);
249 spin_unlock_bh(&source_gc_lock);
252 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
254 struct amt_source_node *snode;
255 struct hlist_node *t;
258 if (cancel_delayed_work(&gnode->group_timer))
260 hlist_del_rcu(&gnode->node);
261 gnode->tunnel_list->nr_groups--;
264 netdev_dbg(amt->dev, "Leave group %pI4\n",
265 &gnode->group_addr.ip4);
266 #if IS_ENABLED(CONFIG_IPV6)
268 netdev_dbg(amt->dev, "Leave group %pI6\n",
269 &gnode->group_addr.ip6);
271 for (i = 0; i < amt->hash_buckets; i++)
272 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
273 amt_destroy_source(snode);
275 /* tunnel->lock was acquired outside of amt_del_group()
276 * But rcu_read_lock() was acquired too so It's safe.
278 kfree_rcu(gnode, rcu);
281 /* If a source timer expires with a router filter-mode for the group of
282 * INCLUDE, the router concludes that traffic from this particular
283 * source is no longer desired on the attached network, and deletes the
284 * associated source record.
286 static void amt_source_work(struct work_struct *work)
288 struct amt_source_node *snode = container_of(to_delayed_work(work),
289 struct amt_source_node,
291 struct amt_group_node *gnode = snode->gnode;
292 struct amt_dev *amt = gnode->amt;
293 struct amt_tunnel_list *tunnel;
295 tunnel = gnode->tunnel_list;
296 spin_lock_bh(&tunnel->lock);
298 if (gnode->filter_mode == MCAST_INCLUDE) {
299 amt_destroy_source(snode);
300 if (!gnode->nr_sources)
301 amt_del_group(amt, gnode);
303 /* When a router filter-mode for a group is EXCLUDE,
304 * source records are only deleted when the group timer expires
306 snode->status = AMT_SOURCE_STATUS_D_FWD;
309 spin_unlock_bh(&tunnel->lock);
312 static void amt_act_src(struct amt_tunnel_list *tunnel,
313 struct amt_group_node *gnode,
314 struct amt_source_node *snode,
317 struct amt_dev *amt = tunnel->amt;
321 mod_delayed_work(amt_wq, &snode->source_timer,
322 msecs_to_jiffies(amt_gmi(amt)));
324 case AMT_ACT_GMI_ZERO:
325 cancel_delayed_work(&snode->source_timer);
328 mod_delayed_work(amt_wq, &snode->source_timer,
329 gnode->group_timer.timer.expires);
331 case AMT_ACT_STATUS_FWD_NEW:
332 snode->status = AMT_SOURCE_STATUS_FWD;
333 snode->flags = AMT_SOURCE_NEW;
335 case AMT_ACT_STATUS_D_FWD_NEW:
336 snode->status = AMT_SOURCE_STATUS_D_FWD;
337 snode->flags = AMT_SOURCE_NEW;
339 case AMT_ACT_STATUS_NONE_NEW:
340 cancel_delayed_work(&snode->source_timer);
341 snode->status = AMT_SOURCE_STATUS_NONE;
342 snode->flags = AMT_SOURCE_NEW;
350 netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
351 &snode->source_addr.ip4,
352 &gnode->group_addr.ip4,
354 #if IS_ENABLED(CONFIG_IPV6)
356 netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
357 &snode->source_addr.ip6,
358 &gnode->group_addr.ip6,
363 static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
366 struct amt_source_node *snode;
368 snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
372 memcpy(&snode->source_addr, src, sizeof(union amt_addr));
373 snode->gnode = gnode;
374 snode->status = AMT_SOURCE_STATUS_NONE;
375 snode->flags = AMT_SOURCE_NEW;
376 INIT_HLIST_NODE(&snode->node);
377 INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
382 /* RFC 3810 - 7.2.2. Definition of Filter Timers
384 * Router Mode Filter Timer Actions/Comments
385 * ----------- ----------------- ----------------
387 * INCLUDE Not Used All listeners in
390 * EXCLUDE Timer > 0 At least one listener
393 * EXCLUDE Timer == 0 No more listeners in
394 * EXCLUDE mode for the
396 * If the Requested List
399 * Record. If not, switch
400 * to INCLUDE filter mode;
403 * moved to the Include
404 * List, and the Exclude
407 static void amt_group_work(struct work_struct *work)
409 struct amt_group_node *gnode = container_of(to_delayed_work(work),
410 struct amt_group_node,
412 struct amt_tunnel_list *tunnel = gnode->tunnel_list;
413 struct amt_dev *amt = gnode->amt;
414 struct amt_source_node *snode;
415 bool delete_group = true;
416 struct hlist_node *t;
419 buckets = amt->hash_buckets;
421 spin_lock_bh(&tunnel->lock);
422 if (gnode->filter_mode == MCAST_INCLUDE) {
424 spin_unlock_bh(&tunnel->lock);
429 for (i = 0; i < buckets; i++) {
430 hlist_for_each_entry_safe(snode, t,
431 &gnode->sources[i], node) {
432 if (!delayed_work_pending(&snode->source_timer) ||
433 snode->status == AMT_SOURCE_STATUS_D_FWD) {
434 amt_destroy_source(snode);
436 delete_group = false;
437 snode->status = AMT_SOURCE_STATUS_FWD;
442 amt_del_group(amt, gnode);
444 gnode->filter_mode = MCAST_INCLUDE;
446 spin_unlock_bh(&tunnel->lock);
451 /* Non-existant group is created as INCLUDE {empty}:
453 * RFC 3376 - 5.1. Action on Change of Interface State
455 * If no interface state existed for that multicast address before
456 * the change (i.e., the change consisted of creating a new
457 * per-interface record), or if no state exists after the change
458 * (i.e., the change consisted of deleting a per-interface record),
459 * then the "non-existent" state is considered to have a filter mode
460 * of INCLUDE and an empty source list.
462 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
463 struct amt_tunnel_list *tunnel,
464 union amt_addr *group,
465 union amt_addr *host,
468 struct amt_group_node *gnode;
472 if (tunnel->nr_groups >= amt->max_groups)
473 return ERR_PTR(-ENOSPC);
475 gnode = kzalloc(sizeof(*gnode) +
476 (sizeof(struct hlist_head) * amt->hash_buckets),
478 if (unlikely(!gnode))
479 return ERR_PTR(-ENOMEM);
482 gnode->group_addr = *group;
483 gnode->host_addr = *host;
485 gnode->tunnel_list = tunnel;
486 gnode->filter_mode = MCAST_INCLUDE;
487 INIT_HLIST_NODE(&gnode->node);
488 INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
489 for (i = 0; i < amt->hash_buckets; i++)
490 INIT_HLIST_HEAD(&gnode->sources[i]);
492 hash = amt_group_hash(tunnel, group);
493 hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
497 netdev_dbg(amt->dev, "Join group %pI4\n",
498 &gnode->group_addr.ip4);
499 #if IS_ENABLED(CONFIG_IPV6)
501 netdev_dbg(amt->dev, "Join group %pI6\n",
502 &gnode->group_addr.ip6);
508 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
510 u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
511 int hlen = LL_RESERVED_SPACE(amt->dev);
512 int tlen = amt->dev->needed_tailroom;
513 struct igmpv3_query *ihv3;
514 void *csum_start = NULL;
515 __sum16 *csum = NULL;
522 len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
523 skb = netdev_alloc_skb_ip_align(amt->dev, len);
527 skb_reserve(skb, hlen);
528 skb_push(skb, sizeof(*eth));
529 skb->protocol = htons(ETH_P_IP);
530 skb_reset_mac_header(skb);
531 skb->priority = TC_PRIO_CONTROL;
532 skb_put(skb, sizeof(*iph));
533 skb_put_data(skb, ra, sizeof(ra));
534 skb_put(skb, sizeof(*ihv3));
535 skb_pull(skb, sizeof(*eth));
536 skb_reset_network_header(skb);
540 iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
542 iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
543 iph->frag_off = htons(IP_DF);
546 iph->protocol = IPPROTO_IGMP;
547 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
548 iph->saddr = htonl(INADDR_ANY);
552 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
553 ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
554 eth->h_proto = htons(ETH_P_IP);
556 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
557 skb_reset_transport_header(skb);
558 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
561 ihv3->qqic = amt->qi;
564 ihv3->suppress = false;
565 ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
568 csum_start = (void *)ihv3;
569 *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
570 offset = skb_transport_offset(skb);
571 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
572 skb->ip_summed = CHECKSUM_NONE;
574 skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
579 static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
582 if (validate && amt->status >= status)
584 netdev_dbg(amt->dev, "Update GW status %s -> %s",
585 status_str[amt->status], status_str[status]);
586 amt->status = status;
589 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
590 enum amt_status status,
593 if (validate && tunnel->status >= status)
595 netdev_dbg(tunnel->amt->dev,
596 "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
597 &tunnel->ip4, ntohs(tunnel->source_port),
598 status_str[tunnel->status], status_str[status]);
599 tunnel->status = status;
602 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
605 spin_lock_bh(&amt->lock);
606 __amt_update_gw_status(amt, status, validate);
607 spin_unlock_bh(&amt->lock);
610 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
611 enum amt_status status, bool validate)
613 spin_lock_bh(&tunnel->lock);
614 __amt_update_relay_status(tunnel, status, validate);
615 spin_unlock_bh(&tunnel->lock);
618 static void amt_send_discovery(struct amt_dev *amt)
620 struct amt_header_discovery *amtd;
621 int hlen, tlen, offset;
632 sock = rcu_dereference(amt->sock);
636 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
639 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
640 amt->discovery_ip, amt->local_ip,
641 amt->gw_port, amt->relay_port,
643 amt->stream_dev->ifindex);
645 amt->dev->stats.tx_errors++;
649 hlen = LL_RESERVED_SPACE(amt->dev);
650 tlen = amt->dev->needed_tailroom;
651 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
652 skb = netdev_alloc_skb_ip_align(amt->dev, len);
655 amt->dev->stats.tx_errors++;
659 skb->priority = TC_PRIO_CONTROL;
660 skb_dst_set(skb, &rt->dst);
662 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
663 skb_reset_network_header(skb);
665 amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
667 amtd->type = AMT_MSG_DISCOVERY;
669 amtd->nonce = amt->nonce;
670 skb_push(skb, sizeof(*udph));
671 skb_reset_transport_header(skb);
673 udph->source = amt->gw_port;
674 udph->dest = amt->relay_port;
675 udph->len = htons(sizeof(*udph) + sizeof(*amtd));
677 offset = skb_transport_offset(skb);
678 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
679 udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
680 sizeof(*udph) + sizeof(*amtd),
681 IPPROTO_UDP, skb->csum);
683 skb_push(skb, sizeof(*iph));
686 iph->ihl = (sizeof(struct iphdr)) >> 2;
689 iph->ttl = ip4_dst_hoplimit(&rt->dst);
690 iph->daddr = amt->discovery_ip;
691 iph->saddr = amt->local_ip;
692 iph->protocol = IPPROTO_UDP;
693 iph->tot_len = htons(len);
695 skb->ip_summed = CHECKSUM_NONE;
696 ip_select_ident(amt->net, skb, NULL);
698 err = ip_local_out(amt->net, sock->sk, skb);
699 if (unlikely(net_xmit_eval(err)))
700 amt->dev->stats.tx_errors++;
702 spin_lock_bh(&amt->lock);
703 __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
704 spin_unlock_bh(&amt->lock);
709 static void amt_send_request(struct amt_dev *amt, bool v6)
711 struct amt_header_request *amtrh;
712 int hlen, tlen, offset;
723 sock = rcu_dereference(amt->sock);
727 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
730 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
731 amt->remote_ip, amt->local_ip,
732 amt->gw_port, amt->relay_port,
734 amt->stream_dev->ifindex);
736 amt->dev->stats.tx_errors++;
740 hlen = LL_RESERVED_SPACE(amt->dev);
741 tlen = amt->dev->needed_tailroom;
742 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
743 skb = netdev_alloc_skb_ip_align(amt->dev, len);
746 amt->dev->stats.tx_errors++;
750 skb->priority = TC_PRIO_CONTROL;
751 skb_dst_set(skb, &rt->dst);
753 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
754 skb_reset_network_header(skb);
756 amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
758 amtrh->type = AMT_MSG_REQUEST;
759 amtrh->reserved1 = 0;
761 amtrh->reserved2 = 0;
762 amtrh->nonce = amt->nonce;
763 skb_push(skb, sizeof(*udph));
764 skb_reset_transport_header(skb);
766 udph->source = amt->gw_port;
767 udph->dest = amt->relay_port;
768 udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
770 offset = skb_transport_offset(skb);
771 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
772 udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
773 sizeof(*udph) + sizeof(*amtrh),
774 IPPROTO_UDP, skb->csum);
776 skb_push(skb, sizeof(*iph));
779 iph->ihl = (sizeof(struct iphdr)) >> 2;
782 iph->ttl = ip4_dst_hoplimit(&rt->dst);
783 iph->daddr = amt->remote_ip;
784 iph->saddr = amt->local_ip;
785 iph->protocol = IPPROTO_UDP;
786 iph->tot_len = htons(len);
788 skb->ip_summed = CHECKSUM_NONE;
789 ip_select_ident(amt->net, skb, NULL);
791 err = ip_local_out(amt->net, sock->sk, skb);
792 if (unlikely(net_xmit_eval(err)))
793 amt->dev->stats.tx_errors++;
799 static void amt_send_igmp_gq(struct amt_dev *amt,
800 struct amt_tunnel_list *tunnel)
804 skb = amt_build_igmp_gq(amt);
808 amt_skb_cb(skb)->tunnel = tunnel;
812 #if IS_ENABLED(CONFIG_IPV6)
813 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
815 u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
816 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
817 int hlen = LL_RESERVED_SPACE(amt->dev);
818 int tlen = amt->dev->needed_tailroom;
819 struct mld2_query *mld2q;
820 void *csum_start = NULL;
821 struct ipv6hdr *ip6h;
826 len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
827 skb = netdev_alloc_skb_ip_align(amt->dev, len);
831 skb_reserve(skb, hlen);
832 skb_push(skb, sizeof(*eth));
833 skb_reset_mac_header(skb);
835 skb->priority = TC_PRIO_CONTROL;
836 skb->protocol = htons(ETH_P_IPV6);
837 skb_put_zero(skb, sizeof(*ip6h));
838 skb_put_data(skb, ra, sizeof(ra));
839 skb_put_zero(skb, sizeof(*mld2q));
840 skb_pull(skb, sizeof(*eth));
841 skb_reset_network_header(skb);
842 ip6h = ipv6_hdr(skb);
843 ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
844 ip6h->nexthdr = NEXTHDR_HOP;
846 ip6h->daddr = mld2_all_node;
847 ip6_flow_hdr(ip6h, 0, 0);
849 if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
851 amt->dev->stats.tx_errors++;
856 eth->h_proto = htons(ETH_P_IPV6);
857 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
858 ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
860 skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
861 skb_reset_transport_header(skb);
862 mld2q = (struct mld2_query *)icmp6_hdr(skb);
863 mld2q->mld2q_mrc = htons(1);
864 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
865 mld2q->mld2q_code = 0;
866 mld2q->mld2q_cksum = 0;
867 mld2q->mld2q_resv1 = 0;
868 mld2q->mld2q_resv2 = 0;
869 mld2q->mld2q_suppress = 0;
870 mld2q->mld2q_qrv = amt->qrv;
871 mld2q->mld2q_nsrcs = 0;
872 mld2q->mld2q_qqic = amt->qi;
873 csum_start = (void *)mld2q;
874 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
877 csum_partial(csum_start,
880 skb->ip_summed = CHECKSUM_NONE;
881 skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
885 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
889 skb = amt_build_mld_gq(amt);
893 amt_skb_cb(skb)->tunnel = tunnel;
897 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
902 static void amt_secret_work(struct work_struct *work)
904 struct amt_dev *amt = container_of(to_delayed_work(work),
908 spin_lock_bh(&amt->lock);
909 get_random_bytes(&amt->key, sizeof(siphash_key_t));
910 spin_unlock_bh(&amt->lock);
911 mod_delayed_work(amt_wq, &amt->secret_wq,
912 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
915 static void amt_discovery_work(struct work_struct *work)
917 struct amt_dev *amt = container_of(to_delayed_work(work),
921 spin_lock_bh(&amt->lock);
922 if (amt->status > AMT_STATUS_SENT_DISCOVERY)
924 get_random_bytes(&amt->nonce, sizeof(__be32));
925 spin_unlock_bh(&amt->lock);
927 amt_send_discovery(amt);
928 spin_lock_bh(&amt->lock);
930 mod_delayed_work(amt_wq, &amt->discovery_wq,
931 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
932 spin_unlock_bh(&amt->lock);
935 static void amt_req_work(struct work_struct *work)
937 struct amt_dev *amt = container_of(to_delayed_work(work),
942 spin_lock_bh(&amt->lock);
943 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
946 if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
947 netdev_dbg(amt->dev, "Gateway is not ready");
948 amt->qi = AMT_INIT_REQ_TIMEOUT;
952 __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
956 spin_unlock_bh(&amt->lock);
958 amt_send_request(amt, false);
959 amt_send_request(amt, true);
960 spin_lock_bh(&amt->lock);
961 __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
964 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
965 mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
966 spin_unlock_bh(&amt->lock);
969 static bool amt_send_membership_update(struct amt_dev *amt,
973 struct amt_header_membership_update *amtmu;
980 sock = rcu_dereference_bh(amt->sock);
984 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
985 sizeof(*iph) + sizeof(struct udphdr));
989 skb_reset_inner_headers(skb);
990 memset(&fl4, 0, sizeof(struct flowi4));
991 fl4.flowi4_oif = amt->stream_dev->ifindex;
992 fl4.daddr = amt->remote_ip;
993 fl4.saddr = amt->local_ip;
994 fl4.flowi4_tos = AMT_TOS;
995 fl4.flowi4_proto = IPPROTO_UDP;
996 rt = ip_route_output_key(amt->net, &fl4);
998 netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
1002 amtmu = skb_push(skb, sizeof(*amtmu));
1004 amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
1005 amtmu->reserved = 0;
1006 amtmu->nonce = amt->nonce;
1007 amtmu->response_mac = amt->mac;
1010 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1012 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1013 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1017 ip4_dst_hoplimit(&rt->dst),
1023 amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1027 static void amt_send_multicast_data(struct amt_dev *amt,
1028 const struct sk_buff *oskb,
1029 struct amt_tunnel_list *tunnel,
1032 struct amt_header_mcast_data *amtmd;
1033 struct socket *sock;
1034 struct sk_buff *skb;
1039 sock = rcu_dereference_bh(amt->sock);
1043 skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
1044 sizeof(struct udphdr), 0, GFP_ATOMIC);
1048 skb_reset_inner_headers(skb);
1049 memset(&fl4, 0, sizeof(struct flowi4));
1050 fl4.flowi4_oif = amt->stream_dev->ifindex;
1051 fl4.daddr = tunnel->ip4;
1052 fl4.saddr = amt->local_ip;
1053 fl4.flowi4_proto = IPPROTO_UDP;
1054 rt = ip_route_output_key(amt->net, &fl4);
1056 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1061 amtmd = skb_push(skb, sizeof(*amtmd));
1063 amtmd->reserved = 0;
1064 amtmd->type = AMT_MSG_MULTICAST_DATA;
1067 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1069 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1070 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1074 ip4_dst_hoplimit(&rt->dst),
1077 tunnel->source_port,
1082 static bool amt_send_membership_query(struct amt_dev *amt,
1083 struct sk_buff *skb,
1084 struct amt_tunnel_list *tunnel,
1087 struct amt_header_membership_query *amtmq;
1088 struct socket *sock;
1093 sock = rcu_dereference_bh(amt->sock);
1097 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1098 sizeof(struct iphdr) + sizeof(struct udphdr));
1102 skb_reset_inner_headers(skb);
1103 memset(&fl4, 0, sizeof(struct flowi4));
1104 fl4.flowi4_oif = amt->stream_dev->ifindex;
1105 fl4.daddr = tunnel->ip4;
1106 fl4.saddr = amt->local_ip;
1107 fl4.flowi4_tos = AMT_TOS;
1108 fl4.flowi4_proto = IPPROTO_UDP;
1109 rt = ip_route_output_key(amt->net, &fl4);
1111 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1115 amtmq = skb_push(skb, sizeof(*amtmq));
1117 amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
1118 amtmq->reserved = 0;
1121 amtmq->nonce = tunnel->nonce;
1122 amtmq->response_mac = tunnel->mac;
1125 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1127 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1128 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1132 ip4_dst_hoplimit(&rt->dst),
1135 tunnel->source_port,
1138 amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
1142 static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1144 struct amt_dev *amt = netdev_priv(dev);
1145 struct amt_tunnel_list *tunnel;
1146 struct amt_group_node *gnode;
1147 union amt_addr group = {0,};
1148 #if IS_ENABLED(CONFIG_IPV6)
1149 struct ipv6hdr *ip6h;
1150 struct mld_msg *mld;
1152 bool report = false;
1161 if (iph->version == 4) {
1162 if (!ipv4_is_multicast(iph->daddr))
1165 if (!ip_mc_check_igmp(skb)) {
1168 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1169 case IGMP_HOST_MEMBERSHIP_REPORT:
1172 case IGMP_HOST_MEMBERSHIP_QUERY:
1182 group.ip4 = iph->daddr;
1183 #if IS_ENABLED(CONFIG_IPV6)
1184 } else if (iph->version == 6) {
1185 ip6h = ipv6_hdr(skb);
1186 if (!ipv6_addr_is_multicast(&ip6h->daddr))
1189 if (!ipv6_mc_check_mld(skb)) {
1190 mld = (struct mld_msg *)skb_transport_header(skb);
1191 switch (mld->mld_type) {
1192 case ICMPV6_MGM_REPORT:
1193 case ICMPV6_MLD2_REPORT:
1196 case ICMPV6_MGM_QUERY:
1206 group.ip6 = ip6h->daddr;
1209 dev->stats.tx_errors++;
1213 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
1216 skb_pull(skb, sizeof(struct ethhdr));
1218 if (amt->mode == AMT_MODE_GATEWAY) {
1219 /* Gateway only passes IGMP/MLD packets */
1222 if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
1224 if (amt_send_membership_update(amt, skb, v6))
1227 } else if (amt->mode == AMT_MODE_RELAY) {
1229 tunnel = amt_skb_cb(skb)->tunnel;
1235 /* Do not forward unexpected query */
1236 if (amt_send_membership_query(amt, skb, tunnel, v6))
1243 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1244 hash = amt_group_hash(tunnel, &group);
1245 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
1248 if (gnode->group_addr.ip4 == iph->daddr)
1250 #if IS_ENABLED(CONFIG_IPV6)
1252 if (ipv6_addr_equal(&gnode->group_addr.ip6,
1260 amt_send_multicast_data(amt, skb, tunnel, v6);
1265 return NETDEV_TX_OK;
1269 dev->stats.tx_dropped++;
1270 return NETDEV_TX_OK;
1273 static int amt_parse_type(struct sk_buff *skb)
1275 struct amt_header *amth;
1277 if (!pskb_may_pull(skb, sizeof(struct udphdr) +
1278 sizeof(struct amt_header)))
1281 amth = (struct amt_header *)(udp_hdr(skb) + 1);
1283 if (amth->version != 0)
1286 if (amth->type >= __AMT_MSG_MAX || !amth->type)
1291 static void amt_clear_groups(struct amt_tunnel_list *tunnel)
1293 struct amt_dev *amt = tunnel->amt;
1294 struct amt_group_node *gnode;
1295 struct hlist_node *t;
1298 spin_lock_bh(&tunnel->lock);
1300 for (i = 0; i < amt->hash_buckets; i++)
1301 hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
1302 amt_del_group(amt, gnode);
1304 spin_unlock_bh(&tunnel->lock);
1307 static void amt_tunnel_expire(struct work_struct *work)
1309 struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
1310 struct amt_tunnel_list,
1312 struct amt_dev *amt = tunnel->amt;
1314 spin_lock_bh(&amt->lock);
1316 list_del_rcu(&tunnel->list);
1318 amt_clear_groups(tunnel);
1320 spin_unlock_bh(&amt->lock);
1321 kfree_rcu(tunnel, rcu);
1324 static void amt_cleanup_srcs(struct amt_dev *amt,
1325 struct amt_tunnel_list *tunnel,
1326 struct amt_group_node *gnode)
1328 struct amt_source_node *snode;
1329 struct hlist_node *t;
1332 /* Delete old sources */
1333 for (i = 0; i < amt->hash_buckets; i++) {
1334 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
1335 if (snode->flags == AMT_SOURCE_OLD)
1336 amt_destroy_source(snode);
1340 /* switch from new to old */
1341 for (i = 0; i < amt->hash_buckets; i++) {
1342 hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
1343 snode->flags = AMT_SOURCE_OLD;
1345 netdev_dbg(snode->gnode->amt->dev,
1346 "Add source as OLD %pI4 from %pI4\n",
1347 &snode->source_addr.ip4,
1348 &gnode->group_addr.ip4);
1349 #if IS_ENABLED(CONFIG_IPV6)
1351 netdev_dbg(snode->gnode->amt->dev,
1352 "Add source as OLD %pI6 from %pI6\n",
1353 &snode->source_addr.ip6,
1354 &gnode->group_addr.ip6);
1360 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1361 struct amt_group_node *gnode, void *grec,
1364 struct igmpv3_grec *igmp_grec;
1365 struct amt_source_node *snode;
1366 #if IS_ENABLED(CONFIG_IPV6)
1367 struct mld2_grec *mld_grec;
1369 union amt_addr src = {0,};
1375 igmp_grec = (struct igmpv3_grec *)grec;
1376 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1378 #if IS_ENABLED(CONFIG_IPV6)
1379 mld_grec = (struct mld2_grec *)grec;
1380 nsrcs = ntohs(mld_grec->grec_nsrcs);
1385 for (i = 0; i < nsrcs; i++) {
1386 if (tunnel->nr_sources >= amt->max_sources)
1389 src.ip4 = igmp_grec->grec_src[i];
1390 #if IS_ENABLED(CONFIG_IPV6)
1392 memcpy(&src.ip6, &mld_grec->grec_src[i],
1393 sizeof(struct in6_addr));
1395 if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
1398 snode = amt_alloc_snode(gnode, &src);
1400 hash = amt_source_hash(tunnel, &snode->source_addr);
1401 hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
1402 tunnel->nr_sources++;
1403 gnode->nr_sources++;
1406 netdev_dbg(snode->gnode->amt->dev,
1407 "Add source as NEW %pI4 from %pI4\n",
1408 &snode->source_addr.ip4,
1409 &gnode->group_addr.ip4);
1410 #if IS_ENABLED(CONFIG_IPV6)
1412 netdev_dbg(snode->gnode->amt->dev,
1413 "Add source as NEW %pI6 from %pI6\n",
1414 &snode->source_addr.ip6,
1415 &gnode->group_addr.ip6);
1421 /* Router State Report Rec'd New Router State
1422 * ------------ ------------ ----------------
1423 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
1425 * -----------+-----------+-----------+
1427 * -----------+-----------+-----------+
1429 * -----------+-----------+-----------+
1431 * -----------+-----------+-----------+
1433 * -----------+-----------+-----------+
1435 * a) Received sources are NONE/NEW
1436 * b) All NONE will be deleted by amt_cleanup_srcs().
1437 * c) All OLD will be deleted by amt_cleanup_srcs().
1438 * d) After delete, NEW source will be switched to OLD.
1440 static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
1441 struct amt_group_node *gnode,
1444 enum amt_filter filter,
1448 struct amt_dev *amt = tunnel->amt;
1449 struct amt_source_node *snode;
1450 struct igmpv3_grec *igmp_grec;
1451 #if IS_ENABLED(CONFIG_IPV6)
1452 struct mld2_grec *mld_grec;
1454 union amt_addr src = {0,};
1455 struct hlist_node *t;
1460 igmp_grec = (struct igmpv3_grec *)grec;
1461 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1463 #if IS_ENABLED(CONFIG_IPV6)
1464 mld_grec = (struct mld2_grec *)grec;
1465 nsrcs = ntohs(mld_grec->grec_nsrcs);
1471 memset(&src, 0, sizeof(union amt_addr));
1475 for (i = 0; i < nsrcs; i++) {
1477 src.ip4 = igmp_grec->grec_src[i];
1478 #if IS_ENABLED(CONFIG_IPV6)
1480 memcpy(&src.ip6, &mld_grec->grec_src[i],
1481 sizeof(struct in6_addr));
1483 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1486 amt_act_src(tunnel, gnode, snode, act);
1491 for (i = 0; i < amt->hash_buckets; i++) {
1492 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1494 if (amt_status_filter(snode, filter))
1495 amt_act_src(tunnel, gnode, snode, act);
1498 for (i = 0; i < nsrcs; i++) {
1500 src.ip4 = igmp_grec->grec_src[i];
1501 #if IS_ENABLED(CONFIG_IPV6)
1503 memcpy(&src.ip6, &mld_grec->grec_src[i],
1504 sizeof(struct in6_addr));
1506 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1509 amt_act_src(tunnel, gnode, snode, act);
1514 for (i = 0; i < amt->hash_buckets; i++) {
1515 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1517 if (!amt_status_filter(snode, filter))
1519 for (j = 0; j < nsrcs; j++) {
1521 src.ip4 = igmp_grec->grec_src[j];
1522 #if IS_ENABLED(CONFIG_IPV6)
1525 &mld_grec->grec_src[j],
1526 sizeof(struct in6_addr));
1528 if (amt_addr_equal(&snode->source_addr,
1532 amt_act_src(tunnel, gnode, snode, act);
1538 case AMT_OPS_SUB_REV:
1540 for (i = 0; i < nsrcs; i++) {
1542 src.ip4 = igmp_grec->grec_src[i];
1543 #if IS_ENABLED(CONFIG_IPV6)
1545 memcpy(&src.ip6, &mld_grec->grec_src[i],
1546 sizeof(struct in6_addr));
1548 snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
1551 snode = amt_lookup_src(tunnel, gnode,
1554 amt_act_src(tunnel, gnode, snode, act);
1559 netdev_dbg(amt->dev, "Invalid type\n");
1564 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1565 struct amt_tunnel_list *tunnel,
1566 struct amt_group_node *gnode,
1567 void *grec, void *zero_grec, bool v6)
1569 if (gnode->filter_mode == MCAST_INCLUDE) {
1570 /* Router State Report Rec'd New Router State Actions
1571 * ------------ ------------ ---------------- -------
1572 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1574 /* Update IS_IN (B) as FWD/NEW */
1575 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1576 AMT_FILTER_NONE_NEW,
1577 AMT_ACT_STATUS_FWD_NEW,
1579 /* Update INCLUDE (A) as NEW */
1580 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1582 AMT_ACT_STATUS_FWD_NEW,
1585 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1591 * ------------ ------------ ---------------- -------
1592 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1594 /* Update (A) in (X, Y) as NONE/NEW */
1595 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1597 AMT_ACT_STATUS_NONE_NEW,
1599 /* Update FWD/OLD as FWD/NEW */
1600 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1602 AMT_ACT_STATUS_FWD_NEW,
1604 /* Update IS_IN (A) as FWD/NEW */
1605 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1606 AMT_FILTER_NONE_NEW,
1607 AMT_ACT_STATUS_FWD_NEW,
1609 /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1610 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1612 AMT_ACT_STATUS_D_FWD_NEW,
1617 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1618 struct amt_tunnel_list *tunnel,
1619 struct amt_group_node *gnode,
1620 void *grec, void *zero_grec, bool v6)
1622 if (gnode->filter_mode == MCAST_INCLUDE) {
1623 /* Router State Report Rec'd New Router State Actions
1624 * ------------ ------------ ---------------- -------
1625 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1629 /* EXCLUDE(A*B, ) */
1630 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1632 AMT_ACT_STATUS_FWD_NEW,
1634 /* EXCLUDE(, B-A) */
1635 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1637 AMT_ACT_STATUS_D_FWD_NEW,
1640 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1641 AMT_FILTER_D_FWD_NEW,
1644 /* Group Timer=GMI */
1645 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1646 msecs_to_jiffies(amt_gmi(amt))))
1648 gnode->filter_mode = MCAST_EXCLUDE;
1649 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1651 /* Router State Report Rec'd New Router State Actions
1652 * ------------ ------------ ---------------- -------
1653 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1658 /* EXCLUDE (A-Y, ) */
1659 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1661 AMT_ACT_STATUS_FWD_NEW,
1663 /* EXCLUDE (, Y*A ) */
1664 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1666 AMT_ACT_STATUS_D_FWD_NEW,
1669 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1670 AMT_FILTER_BOTH_NEW,
1673 /* Group Timer=GMI */
1674 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1675 msecs_to_jiffies(amt_gmi(amt))))
1677 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1681 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1682 struct amt_tunnel_list *tunnel,
1683 struct amt_group_node *gnode,
1684 void *grec, void *zero_grec, bool v6)
1686 if (gnode->filter_mode == MCAST_INCLUDE) {
1687 /* Router State Report Rec'd New Router State Actions
1688 * ------------ ------------ ---------------- -------
1689 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1692 /* Update TO_IN (B) sources as FWD/NEW */
1693 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1694 AMT_FILTER_NONE_NEW,
1695 AMT_ACT_STATUS_FWD_NEW,
1697 /* Update INCLUDE (A) sources as NEW */
1698 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1700 AMT_ACT_STATUS_FWD_NEW,
1703 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1708 /* Router State Report Rec'd New Router State Actions
1709 * ------------ ------------ ---------------- -------
1710 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1714 /* Update TO_IN (A) sources as FWD/NEW */
1715 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1716 AMT_FILTER_NONE_NEW,
1717 AMT_ACT_STATUS_FWD_NEW,
1719 /* Update EXCLUDE(X,) sources as FWD/NEW */
1720 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1722 AMT_ACT_STATUS_FWD_NEW,
1725 * (A) are already switched to FWD_NEW.
1726 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1728 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1730 AMT_ACT_STATUS_D_FWD_NEW,
1733 * Only FWD_NEW will have (A) sources.
1735 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1742 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1743 struct amt_tunnel_list *tunnel,
1744 struct amt_group_node *gnode,
1745 void *grec, void *zero_grec, bool v6)
1747 if (gnode->filter_mode == MCAST_INCLUDE) {
1748 /* Router State Report Rec'd New Router State Actions
1749 * ------------ ------------ ---------------- -------
1750 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1755 /* EXCLUDE (A*B, ) */
1756 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1758 AMT_ACT_STATUS_FWD_NEW,
1760 /* EXCLUDE (, B-A) */
1761 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1763 AMT_ACT_STATUS_D_FWD_NEW,
1766 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1767 AMT_FILTER_D_FWD_NEW,
1770 /* Group Timer=GMI */
1771 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1772 msecs_to_jiffies(amt_gmi(amt))))
1774 gnode->filter_mode = MCAST_EXCLUDE;
1775 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1777 /* Router State Report Rec'd New Router State Actions
1778 * ------------ ------------ ---------------- -------
1779 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1785 /* Update (A-X-Y) as NONE/OLD */
1786 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1790 /* EXCLUDE (A-Y, ) */
1791 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1793 AMT_ACT_STATUS_FWD_NEW,
1795 /* EXCLUDE (, Y*A) */
1796 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1798 AMT_ACT_STATUS_D_FWD_NEW,
1800 /* Group Timer=GMI */
1801 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1802 msecs_to_jiffies(amt_gmi(amt))))
1804 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1808 static void amt_mcast_allow_handler(struct amt_dev *amt,
1809 struct amt_tunnel_list *tunnel,
1810 struct amt_group_node *gnode,
1811 void *grec, void *zero_grec, bool v6)
1813 if (gnode->filter_mode == MCAST_INCLUDE) {
1814 /* Router State Report Rec'd New Router State Actions
1815 * ------------ ------------ ---------------- -------
1816 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1819 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1821 AMT_ACT_STATUS_FWD_NEW,
1824 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1829 /* Router State Report Rec'd New Router State Actions
1830 * ------------ ------------ ---------------- -------
1831 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1833 /* EXCLUDE (X+A, ) */
1834 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1836 AMT_ACT_STATUS_FWD_NEW,
1838 /* EXCLUDE (, Y-A) */
1839 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1841 AMT_ACT_STATUS_D_FWD_NEW,
1844 * All (A) source are now FWD/NEW status.
1846 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1853 static void amt_mcast_block_handler(struct amt_dev *amt,
1854 struct amt_tunnel_list *tunnel,
1855 struct amt_group_node *gnode,
1856 void *grec, void *zero_grec, bool v6)
1858 if (gnode->filter_mode == MCAST_INCLUDE) {
1859 /* Router State Report Rec'd New Router State Actions
1860 * ------------ ------------ ---------------- -------
1861 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1864 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1866 AMT_ACT_STATUS_FWD_NEW,
1869 /* Router State Report Rec'd New Router State Actions
1870 * ------------ ------------ ---------------- -------
1871 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1874 /* (A-X-Y)=Group Timer */
1875 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1880 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1882 AMT_ACT_STATUS_FWD_NEW,
1884 /* EXCLUDE (X+(A-Y) */
1885 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1887 AMT_ACT_STATUS_FWD_NEW,
1890 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1892 AMT_ACT_STATUS_D_FWD_NEW,
1898 * 7.3.2. In the Presence of Older Version Group Members
1900 * When Group Compatibility Mode is IGMPv2, a router internally
1901 * translates the following IGMPv2 messages for that group to their
1902 * IGMPv3 equivalents:
1904 * IGMPv2 Message IGMPv3 Equivalent
1905 * -------------- -----------------
1906 * Report IS_EX( {} )
1909 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1910 struct amt_tunnel_list *tunnel)
1912 struct igmphdr *ih = igmp_hdr(skb);
1913 struct iphdr *iph = ip_hdr(skb);
1914 struct amt_group_node *gnode;
1915 union amt_addr group, host;
1917 memset(&group, 0, sizeof(union amt_addr));
1918 group.ip4 = ih->group;
1919 memset(&host, 0, sizeof(union amt_addr));
1920 host.ip4 = iph->saddr;
1922 gnode = amt_lookup_group(tunnel, &group, &host, false);
1924 gnode = amt_add_group(amt, tunnel, &group, &host, false);
1925 if (!IS_ERR(gnode)) {
1926 gnode->filter_mode = MCAST_EXCLUDE;
1927 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1928 msecs_to_jiffies(amt_gmi(amt))))
1935 * 7.3.2. In the Presence of Older Version Group Members
1937 * When Group Compatibility Mode is IGMPv2, a router internally
1938 * translates the following IGMPv2 messages for that group to their
1939 * IGMPv3 equivalents:
1941 * IGMPv2 Message IGMPv3 Equivalent
1942 * -------------- -----------------
1943 * Report IS_EX( {} )
1946 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1947 struct amt_tunnel_list *tunnel)
1949 struct igmphdr *ih = igmp_hdr(skb);
1950 struct iphdr *iph = ip_hdr(skb);
1951 struct amt_group_node *gnode;
1952 union amt_addr group, host;
1954 memset(&group, 0, sizeof(union amt_addr));
1955 group.ip4 = ih->group;
1956 memset(&host, 0, sizeof(union amt_addr));
1957 host.ip4 = iph->saddr;
1959 gnode = amt_lookup_group(tunnel, &group, &host, false);
1961 amt_del_group(amt, gnode);
1964 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1965 struct amt_tunnel_list *tunnel)
1967 struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
1968 int len = skb_transport_offset(skb) + sizeof(*ihrv3);
1969 void *zero_grec = (void *)&igmpv3_zero_grec;
1970 struct iphdr *iph = ip_hdr(skb);
1971 struct amt_group_node *gnode;
1972 union amt_addr group, host;
1973 struct igmpv3_grec *grec;
1977 for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
1978 len += sizeof(*grec);
1979 if (!ip_mc_may_pull(skb, len))
1982 grec = (void *)(skb->data + len - sizeof(*grec));
1983 nsrcs = ntohs(grec->grec_nsrcs);
1985 len += nsrcs * sizeof(__be32);
1986 if (!ip_mc_may_pull(skb, len))
1989 memset(&group, 0, sizeof(union amt_addr));
1990 group.ip4 = grec->grec_mca;
1991 memset(&host, 0, sizeof(union amt_addr));
1992 host.ip4 = iph->saddr;
1993 gnode = amt_lookup_group(tunnel, &group, &host, false);
1995 gnode = amt_add_group(amt, tunnel, &group, &host,
2001 amt_add_srcs(amt, tunnel, gnode, grec, false);
2002 switch (grec->grec_type) {
2003 case IGMPV3_MODE_IS_INCLUDE:
2004 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2007 case IGMPV3_MODE_IS_EXCLUDE:
2008 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2011 case IGMPV3_CHANGE_TO_INCLUDE:
2012 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2015 case IGMPV3_CHANGE_TO_EXCLUDE:
2016 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2019 case IGMPV3_ALLOW_NEW_SOURCES:
2020 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2023 case IGMPV3_BLOCK_OLD_SOURCES:
2024 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2030 amt_cleanup_srcs(amt, tunnel, gnode);
2034 /* caller held tunnel->lock */
2035 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2036 struct amt_tunnel_list *tunnel)
2038 struct igmphdr *ih = igmp_hdr(skb);
2041 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2042 amt_igmpv3_report_handler(amt, skb, tunnel);
2044 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2045 amt_igmpv2_report_handler(amt, skb, tunnel);
2047 case IGMP_HOST_LEAVE_MESSAGE:
2048 amt_igmpv2_leave_handler(amt, skb, tunnel);
2055 #if IS_ENABLED(CONFIG_IPV6)
2057 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2059 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2060 * using the MLDv2 protocol for that multicast address. When Multicast
2061 * Address Compatibility Mode is MLDv1, a router internally translates
2062 * the following MLDv1 messages for that multicast address to their
2063 * MLDv2 equivalents:
2065 * MLDv1 Message MLDv2 Equivalent
2066 * -------------- -----------------
2067 * Report IS_EX( {} )
2070 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2071 struct amt_tunnel_list *tunnel)
2073 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2074 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2075 struct amt_group_node *gnode;
2076 union amt_addr group, host;
2078 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2079 memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
2081 gnode = amt_lookup_group(tunnel, &group, &host, true);
2083 gnode = amt_add_group(amt, tunnel, &group, &host, true);
2084 if (!IS_ERR(gnode)) {
2085 gnode->filter_mode = MCAST_EXCLUDE;
2086 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
2087 msecs_to_jiffies(amt_gmi(amt))))
2094 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2096 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2097 * using the MLDv2 protocol for that multicast address. When Multicast
2098 * Address Compatibility Mode is MLDv1, a router internally translates
2099 * the following MLDv1 messages for that multicast address to their
2100 * MLDv2 equivalents:
2102 * MLDv1 Message MLDv2 Equivalent
2103 * -------------- -----------------
2104 * Report IS_EX( {} )
2107 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2108 struct amt_tunnel_list *tunnel)
2110 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2111 struct iphdr *iph = ip_hdr(skb);
2112 struct amt_group_node *gnode;
2113 union amt_addr group, host;
2115 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2116 memset(&host, 0, sizeof(union amt_addr));
2117 host.ip4 = iph->saddr;
2119 gnode = amt_lookup_group(tunnel, &group, &host, true);
2121 amt_del_group(amt, gnode);
2126 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2127 struct amt_tunnel_list *tunnel)
2129 struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
2130 int len = skb_transport_offset(skb) + sizeof(*mld2r);
2131 void *zero_grec = (void *)&mldv2_zero_grec;
2132 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2133 struct amt_group_node *gnode;
2134 union amt_addr group, host;
2135 struct mld2_grec *grec;
2139 for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
2140 len += sizeof(*grec);
2141 if (!ipv6_mc_may_pull(skb, len))
2144 grec = (void *)(skb->data + len - sizeof(*grec));
2145 nsrcs = ntohs(grec->grec_nsrcs);
2147 len += nsrcs * sizeof(struct in6_addr);
2148 if (!ipv6_mc_may_pull(skb, len))
2151 memset(&group, 0, sizeof(union amt_addr));
2152 group.ip6 = grec->grec_mca;
2153 memset(&host, 0, sizeof(union amt_addr));
2154 host.ip6 = ip6h->saddr;
2155 gnode = amt_lookup_group(tunnel, &group, &host, true);
2157 gnode = amt_add_group(amt, tunnel, &group, &host,
2163 amt_add_srcs(amt, tunnel, gnode, grec, true);
2164 switch (grec->grec_type) {
2165 case MLD2_MODE_IS_INCLUDE:
2166 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2169 case MLD2_MODE_IS_EXCLUDE:
2170 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2173 case MLD2_CHANGE_TO_INCLUDE:
2174 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2177 case MLD2_CHANGE_TO_EXCLUDE:
2178 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2181 case MLD2_ALLOW_NEW_SOURCES:
2182 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2185 case MLD2_BLOCK_OLD_SOURCES:
2186 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2192 amt_cleanup_srcs(amt, tunnel, gnode);
2196 /* caller held tunnel->lock */
2197 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2198 struct amt_tunnel_list *tunnel)
2200 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2202 switch (mld->mld_type) {
2203 case ICMPV6_MGM_REPORT:
2204 amt_mldv1_report_handler(amt, skb, tunnel);
2206 case ICMPV6_MLD2_REPORT:
2207 amt_mldv2_report_handler(amt, skb, tunnel);
2209 case ICMPV6_MGM_REDUCTION:
2210 amt_mldv1_leave_handler(amt, skb, tunnel);
2218 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2220 struct amt_header_advertisement *amta;
2223 hdr_size = sizeof(*amta) + sizeof(struct udphdr);
2224 if (!pskb_may_pull(skb, hdr_size))
2227 amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
2231 if (amta->reserved || amta->version)
2234 if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
2235 ipv4_is_zeronet(amta->ip4))
2238 amt->remote_ip = amta->ip4;
2239 netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2240 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2242 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2246 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2248 struct amt_header_mcast_data *amtmd;
2249 int hdr_size, len, err;
2253 hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
2254 if (!pskb_may_pull(skb, hdr_size))
2257 amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
2258 if (amtmd->reserved || amtmd->version)
2261 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
2264 skb_reset_network_header(skb);
2265 skb_push(skb, sizeof(*eth));
2266 skb_reset_mac_header(skb);
2267 skb_pull(skb, sizeof(*eth));
2270 if (!pskb_may_pull(skb, sizeof(*iph)))
2274 if (iph->version == 4) {
2275 if (!ipv4_is_multicast(iph->daddr))
2277 skb->protocol = htons(ETH_P_IP);
2278 eth->h_proto = htons(ETH_P_IP);
2279 ip_eth_mc_map(iph->daddr, eth->h_dest);
2280 #if IS_ENABLED(CONFIG_IPV6)
2281 } else if (iph->version == 6) {
2282 struct ipv6hdr *ip6h;
2284 if (!pskb_may_pull(skb, sizeof(*ip6h)))
2287 ip6h = ipv6_hdr(skb);
2288 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2290 skb->protocol = htons(ETH_P_IPV6);
2291 eth->h_proto = htons(ETH_P_IPV6);
2292 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2298 skb->pkt_type = PACKET_MULTICAST;
2299 skb->ip_summed = CHECKSUM_NONE;
2301 err = gro_cells_receive(&amt->gro_cells, skb);
2302 if (likely(err == NET_RX_SUCCESS))
2303 dev_sw_netstats_rx_add(amt->dev, len);
2305 amt->dev->stats.rx_dropped++;
2310 static bool amt_membership_query_handler(struct amt_dev *amt,
2311 struct sk_buff *skb)
2313 struct amt_header_membership_query *amtmq;
2314 struct igmpv3_query *ihv3;
2315 struct ethhdr *eth, *oeth;
2319 hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
2320 if (!pskb_may_pull(skb, hdr_size))
2323 amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
2324 if (amtmq->reserved || amtmq->version)
2327 hdr_size -= sizeof(*eth);
2328 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
2331 oeth = eth_hdr(skb);
2332 skb_reset_mac_header(skb);
2333 skb_pull(skb, sizeof(*eth));
2334 skb_reset_network_header(skb);
2336 if (!pskb_may_pull(skb, sizeof(*iph)))
2340 if (iph->version == 4) {
2341 if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
2345 if (!ipv4_is_multicast(iph->daddr))
2348 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2349 skb_reset_transport_header(skb);
2350 skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2351 spin_lock_bh(&amt->lock);
2353 amt->mac = amtmq->response_mac;
2355 amt->qi = ihv3->qqic;
2356 spin_unlock_bh(&amt->lock);
2357 skb->protocol = htons(ETH_P_IP);
2358 eth->h_proto = htons(ETH_P_IP);
2359 ip_eth_mc_map(iph->daddr, eth->h_dest);
2360 #if IS_ENABLED(CONFIG_IPV6)
2361 } else if (iph->version == 6) {
2362 struct mld2_query *mld2q;
2363 struct ipv6hdr *ip6h;
2365 if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
2369 ip6h = ipv6_hdr(skb);
2370 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2373 mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2374 skb_reset_transport_header(skb);
2375 skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2376 spin_lock_bh(&amt->lock);
2378 amt->mac = amtmq->response_mac;
2380 amt->qi = mld2q->mld2q_qqic;
2381 spin_unlock_bh(&amt->lock);
2382 skb->protocol = htons(ETH_P_IPV6);
2383 eth->h_proto = htons(ETH_P_IPV6);
2384 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2390 ether_addr_copy(eth->h_source, oeth->h_source);
2391 skb->pkt_type = PACKET_MULTICAST;
2392 skb->ip_summed = CHECKSUM_NONE;
2394 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2395 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2396 dev_sw_netstats_rx_add(amt->dev, len);
2398 amt->dev->stats.rx_dropped++;
2404 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2406 struct amt_header_membership_update *amtmu;
2407 struct amt_tunnel_list *tunnel;
2414 hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
2415 if (!pskb_may_pull(skb, hdr_size))
2418 amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
2419 if (amtmu->reserved || amtmu->version)
2422 if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
2425 skb_reset_network_header(skb);
2427 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2428 if (tunnel->ip4 == iph->saddr) {
2429 if ((amtmu->nonce == tunnel->nonce &&
2430 amtmu->response_mac == tunnel->mac)) {
2431 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2432 msecs_to_jiffies(amt_gmi(amt))
2436 netdev_dbg(amt->dev, "Invalid MAC\n");
2445 if (!pskb_may_pull(skb, sizeof(*iph)))
2449 if (iph->version == 4) {
2450 if (ip_mc_check_igmp(skb)) {
2451 netdev_dbg(amt->dev, "Invalid IGMP\n");
2455 spin_lock_bh(&tunnel->lock);
2456 amt_igmp_report_handler(amt, skb, tunnel);
2457 spin_unlock_bh(&tunnel->lock);
2459 skb_push(skb, sizeof(struct ethhdr));
2460 skb_reset_mac_header(skb);
2462 skb->protocol = htons(ETH_P_IP);
2463 eth->h_proto = htons(ETH_P_IP);
2464 ip_eth_mc_map(iph->daddr, eth->h_dest);
2465 #if IS_ENABLED(CONFIG_IPV6)
2466 } else if (iph->version == 6) {
2467 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2469 if (ipv6_mc_check_mld(skb)) {
2470 netdev_dbg(amt->dev, "Invalid MLD\n");
2474 spin_lock_bh(&tunnel->lock);
2475 amt_mld_report_handler(amt, skb, tunnel);
2476 spin_unlock_bh(&tunnel->lock);
2478 skb_push(skb, sizeof(struct ethhdr));
2479 skb_reset_mac_header(skb);
2481 skb->protocol = htons(ETH_P_IPV6);
2482 eth->h_proto = htons(ETH_P_IPV6);
2483 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2486 netdev_dbg(amt->dev, "Unsupported Protocol\n");
2490 skb_pull(skb, sizeof(struct ethhdr));
2491 skb->pkt_type = PACKET_MULTICAST;
2492 skb->ip_summed = CHECKSUM_NONE;
2494 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2495 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
2497 dev_sw_netstats_rx_add(amt->dev, len);
2499 amt->dev->stats.rx_dropped++;
2505 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2506 __be32 daddr, __be16 dport)
2508 struct amt_header_advertisement *amta;
2509 int hlen, tlen, offset;
2510 struct socket *sock;
2511 struct udphdr *udph;
2512 struct sk_buff *skb;
2520 sock = rcu_dereference(amt->sock);
2524 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2527 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2528 daddr, amt->local_ip,
2529 dport, amt->relay_port,
2531 amt->stream_dev->ifindex);
2533 amt->dev->stats.tx_errors++;
2537 hlen = LL_RESERVED_SPACE(amt->dev);
2538 tlen = amt->dev->needed_tailroom;
2539 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2540 skb = netdev_alloc_skb_ip_align(amt->dev, len);
2543 amt->dev->stats.tx_errors++;
2547 skb->priority = TC_PRIO_CONTROL;
2548 skb_dst_set(skb, &rt->dst);
2550 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2551 skb_reset_network_header(skb);
2553 amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
2555 amta->type = AMT_MSG_ADVERTISEMENT;
2557 amta->nonce = nonce;
2558 amta->ip4 = amt->local_ip;
2559 skb_push(skb, sizeof(*udph));
2560 skb_reset_transport_header(skb);
2561 udph = udp_hdr(skb);
2562 udph->source = amt->relay_port;
2564 udph->len = htons(sizeof(*amta) + sizeof(*udph));
2566 offset = skb_transport_offset(skb);
2567 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
2568 udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2569 sizeof(*udph) + sizeof(*amta),
2570 IPPROTO_UDP, skb->csum);
2572 skb_push(skb, sizeof(*iph));
2575 iph->ihl = (sizeof(struct iphdr)) >> 2;
2578 iph->ttl = ip4_dst_hoplimit(&rt->dst);
2580 iph->saddr = amt->local_ip;
2581 iph->protocol = IPPROTO_UDP;
2582 iph->tot_len = htons(len);
2584 skb->ip_summed = CHECKSUM_NONE;
2585 ip_select_ident(amt->net, skb, NULL);
2587 err = ip_local_out(amt->net, sock->sk, skb);
2588 if (unlikely(net_xmit_eval(err)))
2589 amt->dev->stats.tx_errors++;
2595 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2597 struct amt_header_discovery *amtd;
2598 struct udphdr *udph;
2601 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
2605 udph = udp_hdr(skb);
2606 amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
2608 if (amtd->reserved || amtd->version)
2611 amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2616 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2618 struct amt_header_request *amtrh;
2619 struct amt_tunnel_list *tunnel;
2620 unsigned long long key;
2621 struct udphdr *udph;
2626 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
2630 udph = udp_hdr(skb);
2631 amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
2633 if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
2636 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2637 if (tunnel->ip4 == iph->saddr)
2640 if (amt->nr_tunnels >= amt->max_tunnels) {
2641 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
2645 tunnel = kzalloc(sizeof(*tunnel) +
2646 (sizeof(struct hlist_head) * amt->hash_buckets),
2651 tunnel->source_port = udph->source;
2652 tunnel->ip4 = iph->saddr;
2654 memcpy(&key, &tunnel->key, sizeof(unsigned long long));
2656 spin_lock_init(&tunnel->lock);
2657 for (i = 0; i < amt->hash_buckets; i++)
2658 INIT_HLIST_HEAD(&tunnel->groups[i]);
2660 INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
2662 spin_lock_bh(&amt->lock);
2663 list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2664 tunnel->key = amt->key;
2665 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
2667 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2668 msecs_to_jiffies(amt_gmi(amt)));
2669 spin_unlock_bh(&amt->lock);
2672 tunnel->nonce = amtrh->nonce;
2673 mac = siphash_3u32((__force u32)tunnel->ip4,
2674 (__force u32)tunnel->source_port,
2675 (__force u32)tunnel->nonce,
2677 tunnel->mac = mac >> 16;
2679 if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2683 amt_send_igmp_gq(amt, tunnel);
2685 amt_send_mld_gq(amt, tunnel);
2690 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
2692 struct amt_dev *amt;
2698 amt = rcu_dereference_sk_user_data(sk);
2705 skb->dev = amt->dev;
2707 type = amt_parse_type(skb);
2713 if (amt->mode == AMT_MODE_GATEWAY) {
2715 case AMT_MSG_ADVERTISEMENT:
2716 if (iph->saddr != amt->discovery_ip) {
2717 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2721 err = amt_advertisement_handler(amt, skb);
2723 case AMT_MSG_MULTICAST_DATA:
2724 if (iph->saddr != amt->remote_ip) {
2725 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2729 err = amt_multicast_data_handler(amt, skb);
2734 case AMT_MSG_MEMBERSHIP_QUERY:
2735 if (iph->saddr != amt->remote_ip) {
2736 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2740 err = amt_membership_query_handler(amt, skb);
2747 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2752 case AMT_MSG_DISCOVERY:
2753 err = amt_discovery_handler(amt, skb);
2755 case AMT_MSG_REQUEST:
2756 err = amt_request_handler(amt, skb);
2758 case AMT_MSG_MEMBERSHIP_UPDATE:
2759 err = amt_update_handler(amt, skb);
2766 netdev_dbg(amt->dev, "Invalid type of relay\n");
2772 amt->dev->stats.rx_dropped++;
2778 rcu_read_unlock_bh();
2782 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
2784 struct amt_dev *amt;
2788 amt = rcu_dereference_sk_user_data(sk);
2792 if (amt->mode != AMT_MODE_GATEWAY)
2795 type = amt_parse_type(skb);
2799 netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2802 case AMT_MSG_DISCOVERY:
2804 case AMT_MSG_REQUEST:
2805 case AMT_MSG_MEMBERSHIP_UPDATE:
2806 if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2807 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2813 rcu_read_unlock_bh();
2816 rcu_read_unlock_bh();
2817 amt->dev->stats.rx_dropped++;
2821 static struct socket *amt_create_sock(struct net *net, __be16 port)
2823 struct udp_port_cfg udp_conf;
2824 struct socket *sock;
2827 memset(&udp_conf, 0, sizeof(udp_conf));
2828 udp_conf.family = AF_INET;
2829 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2831 udp_conf.local_udp_port = port;
2833 err = udp_sock_create(net, &udp_conf, &sock);
2835 return ERR_PTR(err);
2840 static int amt_socket_create(struct amt_dev *amt)
2842 struct udp_tunnel_sock_cfg tunnel_cfg;
2843 struct socket *sock;
2845 sock = amt_create_sock(amt->net, amt->relay_port);
2847 return PTR_ERR(sock);
2849 /* Mark socket as an encapsulation socket */
2850 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2851 tunnel_cfg.sk_user_data = amt;
2852 tunnel_cfg.encap_type = 1;
2853 tunnel_cfg.encap_rcv = amt_rcv;
2854 tunnel_cfg.encap_err_lookup = amt_err_lookup;
2855 tunnel_cfg.encap_destroy = NULL;
2856 setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2858 rcu_assign_pointer(amt->sock, sock);
2862 static int amt_dev_open(struct net_device *dev)
2864 struct amt_dev *amt = netdev_priv(dev);
2867 amt->ready4 = false;
2868 amt->ready6 = false;
2870 err = amt_socket_create(amt);
2876 get_random_bytes(&amt->key, sizeof(siphash_key_t));
2878 amt->status = AMT_STATUS_INIT;
2879 if (amt->mode == AMT_MODE_GATEWAY) {
2880 mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
2881 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2882 } else if (amt->mode == AMT_MODE_RELAY) {
2883 mod_delayed_work(amt_wq, &amt->secret_wq,
2884 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
2889 static int amt_dev_stop(struct net_device *dev)
2891 struct amt_dev *amt = netdev_priv(dev);
2892 struct amt_tunnel_list *tunnel, *tmp;
2893 struct socket *sock;
2895 cancel_delayed_work_sync(&amt->req_wq);
2896 cancel_delayed_work_sync(&amt->discovery_wq);
2897 cancel_delayed_work_sync(&amt->secret_wq);
2900 sock = rtnl_dereference(amt->sock);
2901 RCU_INIT_POINTER(amt->sock, NULL);
2904 udp_tunnel_sock_release(sock);
2906 amt->ready4 = false;
2907 amt->ready6 = false;
2911 list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
2912 list_del_rcu(&tunnel->list);
2914 cancel_delayed_work_sync(&tunnel->gc_wq);
2915 amt_clear_groups(tunnel);
2916 kfree_rcu(tunnel, rcu);
2922 static const struct device_type amt_type = {
2926 static int amt_dev_init(struct net_device *dev)
2928 struct amt_dev *amt = netdev_priv(dev);
2932 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2936 err = gro_cells_init(&amt->gro_cells, dev);
2938 free_percpu(dev->tstats);
2945 static void amt_dev_uninit(struct net_device *dev)
2947 struct amt_dev *amt = netdev_priv(dev);
2949 gro_cells_destroy(&amt->gro_cells);
2950 free_percpu(dev->tstats);
2953 static const struct net_device_ops amt_netdev_ops = {
2954 .ndo_init = amt_dev_init,
2955 .ndo_uninit = amt_dev_uninit,
2956 .ndo_open = amt_dev_open,
2957 .ndo_stop = amt_dev_stop,
2958 .ndo_start_xmit = amt_dev_xmit,
2959 .ndo_get_stats64 = dev_get_tstats64,
2962 static void amt_link_setup(struct net_device *dev)
2964 dev->netdev_ops = &amt_netdev_ops;
2965 dev->needs_free_netdev = true;
2966 SET_NETDEV_DEVTYPE(dev, &amt_type);
2967 dev->min_mtu = ETH_MIN_MTU;
2968 dev->max_mtu = ETH_MAX_MTU;
2969 dev->type = ARPHRD_NONE;
2970 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2971 dev->hard_header_len = 0;
2973 dev->priv_flags |= IFF_NO_QUEUE;
2974 dev->features |= NETIF_F_LLTX;
2975 dev->features |= NETIF_F_GSO_SOFTWARE;
2976 dev->features |= NETIF_F_NETNS_LOCAL;
2977 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2978 dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
2979 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2980 eth_hw_addr_random(dev);
2981 eth_zero_addr(dev->broadcast);
2985 static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
2986 [IFLA_AMT_MODE] = { .type = NLA_U32 },
2987 [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
2988 [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
2989 [IFLA_AMT_LINK] = { .type = NLA_U32 },
2990 [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2991 [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2992 [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2993 [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
2996 static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
2997 struct netlink_ext_ack *extack)
3002 if (!data[IFLA_AMT_LINK]) {
3003 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
3004 "Link attribute is required");
3008 if (!data[IFLA_AMT_MODE]) {
3009 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3010 "Mode attribute is required");
3014 if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
3015 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3016 "Mode attribute is not valid");
3020 if (!data[IFLA_AMT_LOCAL_IP]) {
3021 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
3022 "Local attribute is required");
3026 if (!data[IFLA_AMT_DISCOVERY_IP] &&
3027 nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
3028 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
3029 "Discovery attribute is required");
3036 static int amt_newlink(struct net *net, struct net_device *dev,
3037 struct nlattr *tb[], struct nlattr *data[],
3038 struct netlink_ext_ack *extack)
3040 struct amt_dev *amt = netdev_priv(dev);
3044 amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3046 if (data[IFLA_AMT_MAX_TUNNELS] &&
3047 nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
3048 amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3050 amt->max_tunnels = AMT_MAX_TUNNELS;
3052 spin_lock_init(&amt->lock);
3053 amt->max_groups = AMT_MAX_GROUP;
3054 amt->max_sources = AMT_MAX_SOURCE;
3055 amt->hash_buckets = AMT_HSIZE;
3056 amt->nr_tunnels = 0;
3057 get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3058 amt->stream_dev = dev_get_by_index(net,
3059 nla_get_u32(data[IFLA_AMT_LINK]));
3060 if (!amt->stream_dev) {
3061 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3062 "Can't find stream device");
3066 if (amt->stream_dev->type != ARPHRD_ETHER) {
3067 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3068 "Invalid stream device type");
3072 amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3073 if (ipv4_is_loopback(amt->local_ip) ||
3074 ipv4_is_zeronet(amt->local_ip) ||
3075 ipv4_is_multicast(amt->local_ip)) {
3076 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
3077 "Invalid Local address");
3081 if (data[IFLA_AMT_RELAY_PORT])
3082 amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
3084 amt->relay_port = htons(IANA_AMT_UDP_PORT);
3086 if (data[IFLA_AMT_GATEWAY_PORT])
3087 amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
3089 amt->gw_port = htons(IANA_AMT_UDP_PORT);
3091 if (!amt->relay_port) {
3092 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3093 "relay port must not be 0");
3096 if (amt->mode == AMT_MODE_RELAY) {
3097 amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
3099 dev->needed_headroom = amt->stream_dev->needed_headroom +
3101 dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3102 dev->max_mtu = dev->mtu;
3103 dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
3105 if (!data[IFLA_AMT_DISCOVERY_IP]) {
3106 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3107 "discovery must be set in gateway mode");
3110 if (!amt->gw_port) {
3111 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3112 "gateway port must not be 0");
3116 amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3117 if (ipv4_is_loopback(amt->discovery_ip) ||
3118 ipv4_is_zeronet(amt->discovery_ip) ||
3119 ipv4_is_multicast(amt->discovery_ip)) {
3120 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3121 "discovery must be unicast");
3125 dev->needed_headroom = amt->stream_dev->needed_headroom +
3127 dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3128 dev->max_mtu = dev->mtu;
3129 dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
3131 amt->qi = AMT_INIT_QUERY_INTERVAL;
3133 err = register_netdevice(dev);
3135 netdev_dbg(dev, "failed to register new netdev %d\n", err);
3139 err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3141 unregister_netdevice(dev);
3145 INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3146 INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3147 INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3148 INIT_LIST_HEAD(&amt->tunnel_list);
3152 dev_put(amt->stream_dev);
3156 static void amt_dellink(struct net_device *dev, struct list_head *head)
3158 struct amt_dev *amt = netdev_priv(dev);
3160 unregister_netdevice_queue(dev, head);
3161 netdev_upper_dev_unlink(amt->stream_dev, dev);
3162 dev_put(amt->stream_dev);
3165 static size_t amt_get_size(const struct net_device *dev)
3167 return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
3168 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
3169 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
3170 nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
3171 nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
3172 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
3173 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
3174 nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
3177 static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
3179 struct amt_dev *amt = netdev_priv(dev);
3181 if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3182 goto nla_put_failure;
3183 if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3184 goto nla_put_failure;
3185 if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3186 goto nla_put_failure;
3187 if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3188 goto nla_put_failure;
3189 if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3190 goto nla_put_failure;
3191 if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3192 goto nla_put_failure;
3194 if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3195 goto nla_put_failure;
3196 if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3197 goto nla_put_failure;
3205 static struct rtnl_link_ops amt_link_ops __read_mostly = {
3207 .maxtype = IFLA_AMT_MAX,
3208 .policy = amt_policy,
3209 .priv_size = sizeof(struct amt_dev),
3210 .setup = amt_link_setup,
3211 .validate = amt_validate,
3212 .newlink = amt_newlink,
3213 .dellink = amt_dellink,
3214 .get_size = amt_get_size,
3215 .fill_info = amt_fill_info,
3218 static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
3220 struct net_device *upper_dev;
3221 struct amt_dev *amt;
3223 for_each_netdev(dev_net(dev), upper_dev) {
3224 if (netif_is_amt(upper_dev)) {
3225 amt = netdev_priv(upper_dev);
3226 if (amt->stream_dev == dev)
3234 static int amt_device_event(struct notifier_block *unused,
3235 unsigned long event, void *ptr)
3237 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3238 struct net_device *upper_dev;
3239 struct amt_dev *amt;
3243 upper_dev = amt_lookup_upper_dev(dev);
3246 amt = netdev_priv(upper_dev);
3249 case NETDEV_UNREGISTER:
3250 amt_dellink(amt->dev, &list);
3251 unregister_netdevice_many(&list);
3253 case NETDEV_CHANGEMTU:
3254 if (amt->mode == AMT_MODE_RELAY)
3255 new_mtu = dev->mtu - AMT_RELAY_HLEN;
3257 new_mtu = dev->mtu - AMT_GW_HLEN;
3259 dev_set_mtu(amt->dev, new_mtu);
3266 static struct notifier_block amt_notifier_block __read_mostly = {
3267 .notifier_call = amt_device_event,
3270 static int __init amt_init(void)
3274 err = register_netdevice_notifier(&amt_notifier_block);
3278 err = rtnl_link_register(&amt_link_ops);
3280 goto unregister_notifier;
3282 amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
3285 goto rtnl_unregister;
3288 spin_lock_init(&source_gc_lock);
3289 spin_lock_bh(&source_gc_lock);
3290 INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
3291 mod_delayed_work(amt_wq, &source_gc_wq,
3292 msecs_to_jiffies(AMT_GC_INTERVAL));
3293 spin_unlock_bh(&source_gc_lock);
3298 rtnl_link_unregister(&amt_link_ops);
3299 unregister_notifier:
3300 unregister_netdevice_notifier(&amt_notifier_block);
3302 pr_err("error loading AMT module loaded\n");
3305 late_initcall(amt_init);
3307 static void __exit amt_fini(void)
3309 rtnl_link_unregister(&amt_link_ops);
3310 unregister_netdevice_notifier(&amt_notifier_block);
3311 cancel_delayed_work_sync(&source_gc_wq);
3312 __amt_source_gc_work();
3313 destroy_workqueue(amt_wq);
3315 module_exit(amt_fini);
3317 MODULE_LICENSE("GPL");
3318 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
3319 MODULE_ALIAS_RTNL_LINK("amt");