2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit;
68 static DEFINE_MUTEX(rtnl_mutex);
72 mutex_lock(&rtnl_mutex);
74 EXPORT_SYMBOL(rtnl_lock);
76 static struct sk_buff *defer_kfree_skb_list;
77 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
80 tail->next = defer_kfree_skb_list;
81 defer_kfree_skb_list = head;
84 EXPORT_SYMBOL(rtnl_kfree_skbs);
86 void __rtnl_unlock(void)
88 struct sk_buff *head = defer_kfree_skb_list;
90 defer_kfree_skb_list = NULL;
92 mutex_unlock(&rtnl_mutex);
95 struct sk_buff *next = head->next;
103 void rtnl_unlock(void)
105 /* This fellow will unlock it for us. */
108 EXPORT_SYMBOL(rtnl_unlock);
110 int rtnl_trylock(void)
112 return mutex_trylock(&rtnl_mutex);
114 EXPORT_SYMBOL(rtnl_trylock);
116 int rtnl_is_locked(void)
118 return mutex_is_locked(&rtnl_mutex);
120 EXPORT_SYMBOL(rtnl_is_locked);
122 #ifdef CONFIG_PROVE_LOCKING
123 bool lockdep_rtnl_is_held(void)
125 return lockdep_is_held(&rtnl_mutex);
127 EXPORT_SYMBOL(lockdep_rtnl_is_held);
128 #endif /* #ifdef CONFIG_PROVE_LOCKING */
130 static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
131 static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
133 static inline int rtm_msgindex(int msgtype)
135 int msgindex = msgtype - RTM_BASE;
138 * msgindex < 0 implies someone tried to register a netlink
139 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
140 * the message type has not been added to linux/rtnetlink.h
142 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
148 * __rtnl_register - Register a rtnetlink message type
149 * @protocol: Protocol family or PF_UNSPEC
150 * @msgtype: rtnetlink message type
151 * @doit: Function pointer called for each request message
152 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
153 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
155 * Registers the specified function pointers (at least one of them has
156 * to be non-NULL) to be called whenever a request message for the
157 * specified protocol family and message type is received.
159 * The special protocol family PF_UNSPEC may be used to define fallback
160 * function pointers for the case when no entry for the specific protocol
163 * Returns 0 on success or a negative error code.
165 int __rtnl_register(int protocol, int msgtype,
166 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
169 struct rtnl_link *tab;
172 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
173 msgindex = rtm_msgindex(msgtype);
175 tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
177 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
181 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
185 tab[msgindex].doit = doit;
187 tab[msgindex].dumpit = dumpit;
188 tab[msgindex].flags |= flags;
192 EXPORT_SYMBOL_GPL(__rtnl_register);
195 * rtnl_register - Register a rtnetlink message type
197 * Identical to __rtnl_register() but panics on failure. This is useful
198 * as failure of this function is very unlikely, it can only happen due
199 * to lack of memory when allocating the chain to store all message
200 * handlers for a protocol. Meant for use in init functions where lack
201 * of memory implies no sense in continuing.
203 void rtnl_register(int protocol, int msgtype,
204 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
207 if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
208 panic("Unable to register rtnetlink message handler, "
209 "protocol = %d, message type = %d\n",
212 EXPORT_SYMBOL_GPL(rtnl_register);
215 * rtnl_unregister - Unregister a rtnetlink message type
216 * @protocol: Protocol family or PF_UNSPEC
217 * @msgtype: rtnetlink message type
219 * Returns 0 on success or a negative error code.
221 int rtnl_unregister(int protocol, int msgtype)
223 struct rtnl_link *handlers;
226 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
227 msgindex = rtm_msgindex(msgtype);
230 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
236 handlers[msgindex].doit = NULL;
237 handlers[msgindex].dumpit = NULL;
238 handlers[msgindex].flags = 0;
243 EXPORT_SYMBOL_GPL(rtnl_unregister);
246 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
247 * @protocol : Protocol family or PF_UNSPEC
249 * Identical to calling rtnl_unregster() for all registered message types
250 * of a certain protocol family.
252 void rtnl_unregister_all(int protocol)
254 struct rtnl_link *handlers;
256 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
259 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
260 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
265 while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
269 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
271 static LIST_HEAD(link_ops);
273 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
275 const struct rtnl_link_ops *ops;
277 list_for_each_entry(ops, &link_ops, list) {
278 if (!strcmp(ops->kind, kind))
285 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
286 * @ops: struct rtnl_link_ops * to register
288 * The caller must hold the rtnl_mutex. This function should be used
289 * by drivers that create devices during module initialization. It
290 * must be called before registering the devices.
292 * Returns 0 on success or a negative error code.
294 int __rtnl_link_register(struct rtnl_link_ops *ops)
296 if (rtnl_link_ops_get(ops->kind))
299 /* The check for setup is here because if ops
300 * does not have that filled up, it is not possible
301 * to use the ops for creating device. So do not
302 * fill up dellink as well. That disables rtnl_dellink.
304 if (ops->setup && !ops->dellink)
305 ops->dellink = unregister_netdevice_queue;
307 list_add_tail(&ops->list, &link_ops);
310 EXPORT_SYMBOL_GPL(__rtnl_link_register);
313 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
314 * @ops: struct rtnl_link_ops * to register
316 * Returns 0 on success or a negative error code.
318 int rtnl_link_register(struct rtnl_link_ops *ops)
323 err = __rtnl_link_register(ops);
327 EXPORT_SYMBOL_GPL(rtnl_link_register);
329 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
331 struct net_device *dev;
332 LIST_HEAD(list_kill);
334 for_each_netdev(net, dev) {
335 if (dev->rtnl_link_ops == ops)
336 ops->dellink(dev, &list_kill);
338 unregister_netdevice_many(&list_kill);
342 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
343 * @ops: struct rtnl_link_ops * to unregister
345 * The caller must hold the rtnl_mutex.
347 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
352 __rtnl_kill_links(net, ops);
354 list_del(&ops->list);
356 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
358 /* Return with the rtnl_lock held when there are no network
359 * devices unregistering in any network namespace.
361 static void rtnl_lock_unregistering_all(void)
365 DEFINE_WAIT_FUNC(wait, woken_wake_function);
367 add_wait_queue(&netdev_unregistering_wq, &wait);
369 unregistering = false;
372 if (net->dev_unreg_count > 0) {
373 unregistering = true;
381 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
383 remove_wait_queue(&netdev_unregistering_wq, &wait);
387 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
388 * @ops: struct rtnl_link_ops * to unregister
390 void rtnl_link_unregister(struct rtnl_link_ops *ops)
392 /* Close the race with cleanup_net() */
393 mutex_lock(&net_mutex);
394 rtnl_lock_unregistering_all();
395 __rtnl_link_unregister(ops);
397 mutex_unlock(&net_mutex);
399 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
401 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
403 struct net_device *master_dev;
404 const struct rtnl_link_ops *ops;
409 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
413 ops = master_dev->rtnl_link_ops;
414 if (!ops || !ops->get_slave_size)
416 /* IFLA_INFO_SLAVE_DATA + nested data */
417 size = nla_total_size(sizeof(struct nlattr)) +
418 ops->get_slave_size(master_dev, dev);
425 static size_t rtnl_link_get_size(const struct net_device *dev)
427 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
433 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
434 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
437 /* IFLA_INFO_DATA + nested data */
438 size += nla_total_size(sizeof(struct nlattr)) +
441 if (ops->get_xstats_size)
442 /* IFLA_INFO_XSTATS */
443 size += nla_total_size(ops->get_xstats_size(dev));
445 size += rtnl_link_get_slave_info_data_size(dev);
450 static LIST_HEAD(rtnl_af_ops);
452 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
454 const struct rtnl_af_ops *ops;
456 list_for_each_entry(ops, &rtnl_af_ops, list) {
457 if (ops->family == family)
465 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
466 * @ops: struct rtnl_af_ops * to register
468 * Returns 0 on success or a negative error code.
470 void rtnl_af_register(struct rtnl_af_ops *ops)
473 list_add_tail(&ops->list, &rtnl_af_ops);
476 EXPORT_SYMBOL_GPL(rtnl_af_register);
479 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
480 * @ops: struct rtnl_af_ops * to unregister
482 void rtnl_af_unregister(struct rtnl_af_ops *ops)
485 list_del(&ops->list);
488 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
490 static size_t rtnl_link_get_af_size(const struct net_device *dev,
493 struct rtnl_af_ops *af_ops;
497 size = nla_total_size(sizeof(struct nlattr));
499 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
500 if (af_ops->get_link_af_size) {
501 /* AF_* + nested data */
502 size += nla_total_size(sizeof(struct nlattr)) +
503 af_ops->get_link_af_size(dev, ext_filter_mask);
510 static bool rtnl_have_link_slave_info(const struct net_device *dev)
512 struct net_device *master_dev;
517 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
518 if (master_dev && master_dev->rtnl_link_ops)
524 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
525 const struct net_device *dev)
527 struct net_device *master_dev;
528 const struct rtnl_link_ops *ops;
529 struct nlattr *slave_data;
532 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
535 ops = master_dev->rtnl_link_ops;
538 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
540 if (ops->fill_slave_info) {
541 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
544 err = ops->fill_slave_info(skb, master_dev, dev);
546 goto err_cancel_slave_data;
547 nla_nest_end(skb, slave_data);
551 err_cancel_slave_data:
552 nla_nest_cancel(skb, slave_data);
556 static int rtnl_link_info_fill(struct sk_buff *skb,
557 const struct net_device *dev)
559 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
565 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
567 if (ops->fill_xstats) {
568 err = ops->fill_xstats(skb, dev);
572 if (ops->fill_info) {
573 data = nla_nest_start(skb, IFLA_INFO_DATA);
576 err = ops->fill_info(skb, dev);
578 goto err_cancel_data;
579 nla_nest_end(skb, data);
584 nla_nest_cancel(skb, data);
588 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
590 struct nlattr *linkinfo;
593 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
594 if (linkinfo == NULL)
597 err = rtnl_link_info_fill(skb, dev);
599 goto err_cancel_link;
601 err = rtnl_link_slave_info_fill(skb, dev);
603 goto err_cancel_link;
605 nla_nest_end(skb, linkinfo);
609 nla_nest_cancel(skb, linkinfo);
614 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
616 struct sock *rtnl = net->rtnl;
619 NETLINK_CB(skb).dst_group = group;
621 refcount_inc(&skb->users);
622 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
624 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
628 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
630 struct sock *rtnl = net->rtnl;
632 return nlmsg_unicast(rtnl, skb, pid);
634 EXPORT_SYMBOL(rtnl_unicast);
636 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
637 struct nlmsghdr *nlh, gfp_t flags)
639 struct sock *rtnl = net->rtnl;
643 report = nlmsg_report(nlh);
645 nlmsg_notify(rtnl, skb, pid, group, report, flags);
647 EXPORT_SYMBOL(rtnl_notify);
649 void rtnl_set_sk_err(struct net *net, u32 group, int error)
651 struct sock *rtnl = net->rtnl;
653 netlink_set_err(rtnl, 0, group, error);
655 EXPORT_SYMBOL(rtnl_set_sk_err);
657 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
662 mx = nla_nest_start(skb, RTA_METRICS);
666 for (i = 0; i < RTAX_MAX; i++) {
668 if (i == RTAX_CC_ALGO - 1) {
669 char tmp[TCP_CA_NAME_MAX], *name;
671 name = tcp_ca_get_name_by_key(metrics[i], tmp);
674 if (nla_put_string(skb, i + 1, name))
675 goto nla_put_failure;
676 } else if (i == RTAX_FEATURES - 1) {
677 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
681 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
682 if (nla_put_u32(skb, i + 1, user_features))
683 goto nla_put_failure;
685 if (nla_put_u32(skb, i + 1, metrics[i]))
686 goto nla_put_failure;
693 nla_nest_cancel(skb, mx);
697 return nla_nest_end(skb, mx);
700 nla_nest_cancel(skb, mx);
703 EXPORT_SYMBOL(rtnetlink_put_metrics);
705 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
706 long expires, u32 error)
708 struct rta_cacheinfo ci = {
709 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
710 .rta_used = dst->__use,
711 .rta_clntref = atomic_read(&(dst->__refcnt)),
719 clock = jiffies_to_clock_t(abs(expires));
720 clock = min_t(unsigned long, clock, INT_MAX);
721 ci.rta_expires = (expires > 0) ? clock : -clock;
723 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
725 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
727 static void set_operstate(struct net_device *dev, unsigned char transition)
729 unsigned char operstate = dev->operstate;
731 switch (transition) {
733 if ((operstate == IF_OPER_DORMANT ||
734 operstate == IF_OPER_UNKNOWN) &&
736 operstate = IF_OPER_UP;
739 case IF_OPER_DORMANT:
740 if (operstate == IF_OPER_UP ||
741 operstate == IF_OPER_UNKNOWN)
742 operstate = IF_OPER_DORMANT;
746 if (dev->operstate != operstate) {
747 write_lock_bh(&dev_base_lock);
748 dev->operstate = operstate;
749 write_unlock_bh(&dev_base_lock);
750 netdev_state_change(dev);
754 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
756 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
757 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
760 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
761 const struct ifinfomsg *ifm)
763 unsigned int flags = ifm->ifi_flags;
765 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
767 flags = (flags & ifm->ifi_change) |
768 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
773 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
774 const struct rtnl_link_stats64 *b)
776 a->rx_packets = b->rx_packets;
777 a->tx_packets = b->tx_packets;
778 a->rx_bytes = b->rx_bytes;
779 a->tx_bytes = b->tx_bytes;
780 a->rx_errors = b->rx_errors;
781 a->tx_errors = b->tx_errors;
782 a->rx_dropped = b->rx_dropped;
783 a->tx_dropped = b->tx_dropped;
785 a->multicast = b->multicast;
786 a->collisions = b->collisions;
788 a->rx_length_errors = b->rx_length_errors;
789 a->rx_over_errors = b->rx_over_errors;
790 a->rx_crc_errors = b->rx_crc_errors;
791 a->rx_frame_errors = b->rx_frame_errors;
792 a->rx_fifo_errors = b->rx_fifo_errors;
793 a->rx_missed_errors = b->rx_missed_errors;
795 a->tx_aborted_errors = b->tx_aborted_errors;
796 a->tx_carrier_errors = b->tx_carrier_errors;
797 a->tx_fifo_errors = b->tx_fifo_errors;
798 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
799 a->tx_window_errors = b->tx_window_errors;
801 a->rx_compressed = b->rx_compressed;
802 a->tx_compressed = b->tx_compressed;
804 a->rx_nohandler = b->rx_nohandler;
808 static inline int rtnl_vfinfo_size(const struct net_device *dev,
811 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
812 int num_vfs = dev_num_vf(dev->dev.parent);
813 size_t size = nla_total_size(0);
816 nla_total_size(sizeof(struct ifla_vf_mac)) +
817 nla_total_size(sizeof(struct ifla_vf_vlan)) +
818 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
819 nla_total_size(MAX_VLAN_LIST_LEN *
820 sizeof(struct ifla_vf_vlan_info)) +
821 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
822 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
823 nla_total_size(sizeof(struct ifla_vf_rate)) +
824 nla_total_size(sizeof(struct ifla_vf_link_state)) +
825 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
826 nla_total_size(0) + /* nest IFLA_VF_STATS */
827 /* IFLA_VF_STATS_RX_PACKETS */
828 nla_total_size_64bit(sizeof(__u64)) +
829 /* IFLA_VF_STATS_TX_PACKETS */
830 nla_total_size_64bit(sizeof(__u64)) +
831 /* IFLA_VF_STATS_RX_BYTES */
832 nla_total_size_64bit(sizeof(__u64)) +
833 /* IFLA_VF_STATS_TX_BYTES */
834 nla_total_size_64bit(sizeof(__u64)) +
835 /* IFLA_VF_STATS_BROADCAST */
836 nla_total_size_64bit(sizeof(__u64)) +
837 /* IFLA_VF_STATS_MULTICAST */
838 nla_total_size_64bit(sizeof(__u64)) +
839 nla_total_size(sizeof(struct ifla_vf_trust)));
845 static size_t rtnl_port_size(const struct net_device *dev,
848 size_t port_size = nla_total_size(4) /* PORT_VF */
849 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
850 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
851 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
852 + nla_total_size(1) /* PROT_VDP_REQUEST */
853 + nla_total_size(2); /* PORT_VDP_RESPONSE */
854 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
855 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
857 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
860 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
861 !(ext_filter_mask & RTEXT_FILTER_VF))
863 if (dev_num_vf(dev->dev.parent))
864 return port_self_size + vf_ports_size +
865 vf_port_size * dev_num_vf(dev->dev.parent);
867 return port_self_size;
870 static size_t rtnl_xdp_size(void)
872 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
873 nla_total_size(1) + /* XDP_ATTACHED */
874 nla_total_size(4); /* XDP_PROG_ID */
879 static noinline size_t if_nlmsg_size(const struct net_device *dev,
882 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
883 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
884 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
885 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
886 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
887 + nla_total_size(sizeof(struct rtnl_link_stats))
888 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
889 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
890 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
891 + nla_total_size(4) /* IFLA_TXQLEN */
892 + nla_total_size(4) /* IFLA_WEIGHT */
893 + nla_total_size(4) /* IFLA_MTU */
894 + nla_total_size(4) /* IFLA_LINK */
895 + nla_total_size(4) /* IFLA_MASTER */
896 + nla_total_size(1) /* IFLA_CARRIER */
897 + nla_total_size(4) /* IFLA_PROMISCUITY */
898 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
899 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
900 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
901 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
902 + nla_total_size(1) /* IFLA_OPERSTATE */
903 + nla_total_size(1) /* IFLA_LINKMODE */
904 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
905 + nla_total_size(4) /* IFLA_LINK_NETNSID */
906 + nla_total_size(4) /* IFLA_GROUP */
907 + nla_total_size(ext_filter_mask
908 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
909 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
910 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
911 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
912 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
913 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
914 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
915 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
916 + rtnl_xdp_size() /* IFLA_XDP */
917 + nla_total_size(4) /* IFLA_EVENT */
918 + nla_total_size(4) /* IFLA_NEW_NETNSID */
919 + nla_total_size(1); /* IFLA_PROTO_DOWN */
923 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
925 struct nlattr *vf_ports;
926 struct nlattr *vf_port;
930 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
934 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
935 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
937 goto nla_put_failure;
938 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
939 goto nla_put_failure;
940 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
941 if (err == -EMSGSIZE)
942 goto nla_put_failure;
944 nla_nest_cancel(skb, vf_port);
947 nla_nest_end(skb, vf_port);
950 nla_nest_end(skb, vf_ports);
955 nla_nest_cancel(skb, vf_ports);
959 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
961 struct nlattr *port_self;
964 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
968 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
970 nla_nest_cancel(skb, port_self);
971 return (err == -EMSGSIZE) ? err : 0;
974 nla_nest_end(skb, port_self);
979 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
984 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
985 !(ext_filter_mask & RTEXT_FILTER_VF))
988 err = rtnl_port_self_fill(skb, dev);
992 if (dev_num_vf(dev->dev.parent)) {
993 err = rtnl_vf_ports_fill(skb, dev);
1001 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1004 struct netdev_phys_item_id ppid;
1006 err = dev_get_phys_port_id(dev, &ppid);
1008 if (err == -EOPNOTSUPP)
1013 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1019 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1021 char name[IFNAMSIZ];
1024 err = dev_get_phys_port_name(dev, name, sizeof(name));
1026 if (err == -EOPNOTSUPP)
1031 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1037 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1040 struct switchdev_attr attr = {
1042 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1043 .flags = SWITCHDEV_F_NO_RECURSE,
1046 err = switchdev_port_attr_get(dev, &attr);
1048 if (err == -EOPNOTSUPP)
1053 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1060 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1061 struct net_device *dev)
1063 struct rtnl_link_stats64 *sp;
1064 struct nlattr *attr;
1066 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1067 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1071 sp = nla_data(attr);
1072 dev_get_stats(dev, sp);
1074 attr = nla_reserve(skb, IFLA_STATS,
1075 sizeof(struct rtnl_link_stats));
1079 copy_rtnl_link_stats(nla_data(attr), sp);
1084 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1085 struct net_device *dev,
1087 struct nlattr *vfinfo)
1089 struct ifla_vf_rss_query_en vf_rss_query_en;
1090 struct nlattr *vf, *vfstats, *vfvlanlist;
1091 struct ifla_vf_link_state vf_linkstate;
1092 struct ifla_vf_vlan_info vf_vlan_info;
1093 struct ifla_vf_spoofchk vf_spoofchk;
1094 struct ifla_vf_tx_rate vf_tx_rate;
1095 struct ifla_vf_stats vf_stats;
1096 struct ifla_vf_trust vf_trust;
1097 struct ifla_vf_vlan vf_vlan;
1098 struct ifla_vf_rate vf_rate;
1099 struct ifla_vf_mac vf_mac;
1100 struct ifla_vf_info ivi;
1102 memset(&ivi, 0, sizeof(ivi));
1104 /* Not all SR-IOV capable drivers support the
1105 * spoofcheck and "RSS query enable" query. Preset to
1106 * -1 so the user space tool can detect that the driver
1107 * didn't report anything.
1110 ivi.rss_query_en = -1;
1112 /* The default value for VF link state is "auto"
1113 * IFLA_VF_LINK_STATE_AUTO which equals zero
1116 /* VLAN Protocol by default is 802.1Q */
1117 ivi.vlan_proto = htons(ETH_P_8021Q);
1118 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1121 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1130 vf_rss_query_en.vf =
1131 vf_trust.vf = ivi.vf;
1133 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1134 vf_vlan.vlan = ivi.vlan;
1135 vf_vlan.qos = ivi.qos;
1136 vf_vlan_info.vlan = ivi.vlan;
1137 vf_vlan_info.qos = ivi.qos;
1138 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1139 vf_tx_rate.rate = ivi.max_tx_rate;
1140 vf_rate.min_tx_rate = ivi.min_tx_rate;
1141 vf_rate.max_tx_rate = ivi.max_tx_rate;
1142 vf_spoofchk.setting = ivi.spoofchk;
1143 vf_linkstate.link_state = ivi.linkstate;
1144 vf_rss_query_en.setting = ivi.rss_query_en;
1145 vf_trust.setting = ivi.trusted;
1146 vf = nla_nest_start(skb, IFLA_VF_INFO);
1148 goto nla_put_vfinfo_failure;
1149 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1150 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1151 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1153 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1155 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1157 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1159 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1160 sizeof(vf_rss_query_en),
1161 &vf_rss_query_en) ||
1162 nla_put(skb, IFLA_VF_TRUST,
1163 sizeof(vf_trust), &vf_trust))
1164 goto nla_put_vf_failure;
1165 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1167 goto nla_put_vf_failure;
1168 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1170 nla_nest_cancel(skb, vfvlanlist);
1171 goto nla_put_vf_failure;
1173 nla_nest_end(skb, vfvlanlist);
1174 memset(&vf_stats, 0, sizeof(vf_stats));
1175 if (dev->netdev_ops->ndo_get_vf_stats)
1176 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1178 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1180 goto nla_put_vf_failure;
1181 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1182 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1183 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1184 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1185 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1186 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1187 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1188 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1189 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1190 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1191 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1192 vf_stats.multicast, IFLA_VF_STATS_PAD)) {
1193 nla_nest_cancel(skb, vfstats);
1194 goto nla_put_vf_failure;
1196 nla_nest_end(skb, vfstats);
1197 nla_nest_end(skb, vf);
1201 nla_nest_cancel(skb, vf);
1202 nla_put_vfinfo_failure:
1203 nla_nest_cancel(skb, vfinfo);
1207 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1208 struct net_device *dev,
1209 u32 ext_filter_mask)
1211 struct nlattr *vfinfo;
1214 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1217 num_vfs = dev_num_vf(dev->dev.parent);
1218 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1221 if (!dev->netdev_ops->ndo_get_vf_config)
1224 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1228 for (i = 0; i < num_vfs; i++) {
1229 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1233 nla_nest_end(skb, vfinfo);
1237 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1239 struct rtnl_link_ifmap map;
1241 memset(&map, 0, sizeof(map));
1242 map.mem_start = dev->mem_start;
1243 map.mem_end = dev->mem_end;
1244 map.base_addr = dev->base_addr;
1247 map.port = dev->if_port;
1249 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1255 static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1257 const struct net_device_ops *ops = dev->netdev_ops;
1258 const struct bpf_prog *generic_xdp_prog;
1263 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1264 if (generic_xdp_prog) {
1265 *prog_id = generic_xdp_prog->aux->id;
1266 return XDP_ATTACHED_SKB;
1269 return XDP_ATTACHED_NONE;
1271 return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
1274 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1280 xdp = nla_nest_start(skb, IFLA_XDP);
1284 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1285 rtnl_xdp_attached_mode(dev, &prog_id));
1290 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1295 nla_nest_end(skb, xdp);
1299 nla_nest_cancel(skb, xdp);
1303 static u32 rtnl_get_event(unsigned long event)
1305 u32 rtnl_event_type = IFLA_EVENT_NONE;
1309 rtnl_event_type = IFLA_EVENT_REBOOT;
1311 case NETDEV_FEAT_CHANGE:
1312 rtnl_event_type = IFLA_EVENT_FEATURES;
1314 case NETDEV_BONDING_FAILOVER:
1315 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1317 case NETDEV_NOTIFY_PEERS:
1318 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1320 case NETDEV_RESEND_IGMP:
1321 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1323 case NETDEV_CHANGEINFODATA:
1324 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1330 return rtnl_event_type;
1333 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1335 const struct net_device *upper_dev;
1340 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1342 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1348 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
1350 int ifindex = dev_get_iflink(dev);
1352 if (dev->ifindex == ifindex)
1355 return nla_put_u32(skb, IFLA_LINK, ifindex);
1358 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1359 struct net_device *dev)
1364 ret = dev_get_alias(dev, buf, sizeof(buf));
1365 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1368 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1369 const struct net_device *dev)
1371 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1372 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1374 if (!net_eq(dev_net(dev), link_net)) {
1375 int id = peernet2id_alloc(dev_net(dev), link_net);
1377 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1385 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1386 int type, u32 pid, u32 seq, u32 change,
1387 unsigned int flags, u32 ext_filter_mask,
1388 u32 event, int *new_nsid)
1390 struct ifinfomsg *ifm;
1391 struct nlmsghdr *nlh;
1392 struct nlattr *af_spec;
1393 struct rtnl_af_ops *af_ops;
1396 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1400 ifm = nlmsg_data(nlh);
1401 ifm->ifi_family = AF_UNSPEC;
1403 ifm->ifi_type = dev->type;
1404 ifm->ifi_index = dev->ifindex;
1405 ifm->ifi_flags = dev_get_flags(dev);
1406 ifm->ifi_change = change;
1408 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1409 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1410 nla_put_u8(skb, IFLA_OPERSTATE,
1411 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1412 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1413 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1414 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1415 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1416 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1417 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1418 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1420 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1422 nla_put_iflink(skb, dev) ||
1423 put_master_ifindex(skb, dev) ||
1424 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1426 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1427 nla_put_ifalias(skb, dev) ||
1428 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1429 atomic_read(&dev->carrier_changes)) ||
1430 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1431 goto nla_put_failure;
1433 if (event != IFLA_EVENT_NONE) {
1434 if (nla_put_u32(skb, IFLA_EVENT, event))
1435 goto nla_put_failure;
1438 if (rtnl_fill_link_ifmap(skb, dev))
1439 goto nla_put_failure;
1441 if (dev->addr_len) {
1442 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1443 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1444 goto nla_put_failure;
1447 if (rtnl_phys_port_id_fill(skb, dev))
1448 goto nla_put_failure;
1450 if (rtnl_phys_port_name_fill(skb, dev))
1451 goto nla_put_failure;
1453 if (rtnl_phys_switch_id_fill(skb, dev))
1454 goto nla_put_failure;
1456 if (rtnl_fill_stats(skb, dev))
1457 goto nla_put_failure;
1459 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1460 goto nla_put_failure;
1462 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1463 goto nla_put_failure;
1465 if (rtnl_xdp_fill(skb, dev))
1466 goto nla_put_failure;
1468 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1469 if (rtnl_link_fill(skb, dev) < 0)
1470 goto nla_put_failure;
1473 if (rtnl_fill_link_netnsid(skb, dev))
1474 goto nla_put_failure;
1477 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1478 goto nla_put_failure;
1480 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1481 goto nla_put_failure;
1483 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1484 if (af_ops->fill_link_af) {
1488 if (!(af = nla_nest_start(skb, af_ops->family)))
1489 goto nla_put_failure;
1491 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1494 * Caller may return ENODATA to indicate that there
1495 * was no data to be dumped. This is not an error, it
1496 * means we should trim the attribute header and
1499 if (err == -ENODATA)
1500 nla_nest_cancel(skb, af);
1502 goto nla_put_failure;
1504 nla_nest_end(skb, af);
1508 nla_nest_end(skb, af_spec);
1510 nlmsg_end(skb, nlh);
1514 nlmsg_cancel(skb, nlh);
1518 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1519 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1520 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1521 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1522 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1523 [IFLA_MTU] = { .type = NLA_U32 },
1524 [IFLA_LINK] = { .type = NLA_U32 },
1525 [IFLA_MASTER] = { .type = NLA_U32 },
1526 [IFLA_CARRIER] = { .type = NLA_U8 },
1527 [IFLA_TXQLEN] = { .type = NLA_U32 },
1528 [IFLA_WEIGHT] = { .type = NLA_U32 },
1529 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1530 [IFLA_LINKMODE] = { .type = NLA_U8 },
1531 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1532 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1533 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1534 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1535 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1536 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1537 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1538 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1539 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1540 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1541 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1542 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1543 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1544 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1545 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1546 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1547 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1548 [IFLA_XDP] = { .type = NLA_NESTED },
1549 [IFLA_EVENT] = { .type = NLA_U32 },
1550 [IFLA_GROUP] = { .type = NLA_U32 },
1553 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1554 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1555 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1556 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1557 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1560 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1561 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1562 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1563 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1564 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1565 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1566 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1567 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1568 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1569 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1570 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1571 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1572 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1575 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1576 [IFLA_PORT_VF] = { .type = NLA_U32 },
1577 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1578 .len = PORT_PROFILE_MAX },
1579 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1580 .len = PORT_UUID_MAX },
1581 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1582 .len = PORT_UUID_MAX },
1583 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1584 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1586 /* Unused, but we need to keep it here since user space could
1587 * fill it. It's also broken with regard to NLA_BINARY use in
1588 * combination with structs.
1590 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1591 .len = sizeof(struct ifla_port_vsi) },
1594 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1595 [IFLA_XDP_FD] = { .type = NLA_S32 },
1596 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1597 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1598 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1601 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1603 const struct rtnl_link_ops *ops = NULL;
1604 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1606 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1607 ifla_info_policy, NULL) < 0)
1610 if (linfo[IFLA_INFO_KIND]) {
1611 char kind[MODULE_NAME_LEN];
1613 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1614 ops = rtnl_link_ops_get(kind);
1620 static bool link_master_filtered(struct net_device *dev, int master_idx)
1622 struct net_device *master;
1627 master = netdev_master_upper_dev_get(dev);
1628 if (!master || master->ifindex != master_idx)
1634 static bool link_kind_filtered(const struct net_device *dev,
1635 const struct rtnl_link_ops *kind_ops)
1637 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1643 static bool link_dump_filtered(struct net_device *dev,
1645 const struct rtnl_link_ops *kind_ops)
1647 if (link_master_filtered(dev, master_idx) ||
1648 link_kind_filtered(dev, kind_ops))
1654 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1656 struct net *net = sock_net(skb->sk);
1659 struct net_device *dev;
1660 struct hlist_head *head;
1661 struct nlattr *tb[IFLA_MAX+1];
1662 u32 ext_filter_mask = 0;
1663 const struct rtnl_link_ops *kind_ops = NULL;
1664 unsigned int flags = NLM_F_MULTI;
1670 s_idx = cb->args[1];
1672 /* A hack to preserve kernel<->userspace interface.
1673 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1674 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1675 * what iproute2 < v3.9.0 used.
1676 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1677 * attribute, its netlink message is shorter than struct ifinfomsg.
1679 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1680 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1682 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1683 ifla_policy, NULL) >= 0) {
1684 if (tb[IFLA_EXT_MASK])
1685 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1687 if (tb[IFLA_MASTER])
1688 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1690 if (tb[IFLA_LINKINFO])
1691 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1693 if (master_idx || kind_ops)
1694 flags |= NLM_F_DUMP_FILTERED;
1697 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1699 head = &net->dev_index_head[h];
1700 hlist_for_each_entry(dev, head, index_hlist) {
1701 if (link_dump_filtered(dev, master_idx, kind_ops))
1705 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1706 NETLINK_CB(cb->skb).portid,
1707 cb->nlh->nlmsg_seq, 0,
1709 ext_filter_mask, 0, NULL);
1712 if (likely(skb->len))
1726 cb->seq = net->dev_base_seq;
1727 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1732 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1733 struct netlink_ext_ack *exterr)
1735 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
1737 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1739 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1742 /* Examine the link attributes and figure out which
1743 * network namespace we are talking about.
1745 if (tb[IFLA_NET_NS_PID])
1746 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1747 else if (tb[IFLA_NET_NS_FD])
1748 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1750 net = get_net(src_net);
1753 EXPORT_SYMBOL(rtnl_link_get_net);
1755 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1758 if (tb[IFLA_ADDRESS] &&
1759 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1762 if (tb[IFLA_BROADCAST] &&
1763 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1767 if (tb[IFLA_AF_SPEC]) {
1771 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1772 const struct rtnl_af_ops *af_ops;
1774 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1775 return -EAFNOSUPPORT;
1777 if (!af_ops->set_link_af)
1780 if (af_ops->validate_link_af) {
1781 err = af_ops->validate_link_af(dev, af);
1791 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
1794 const struct net_device_ops *ops = dev->netdev_ops;
1796 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
1799 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
1801 if (dev->type != ARPHRD_INFINIBAND)
1804 return handle_infiniband_guid(dev, ivt, guid_type);
1807 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1809 const struct net_device_ops *ops = dev->netdev_ops;
1812 if (tb[IFLA_VF_MAC]) {
1813 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1816 if (ops->ndo_set_vf_mac)
1817 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1823 if (tb[IFLA_VF_VLAN]) {
1824 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1827 if (ops->ndo_set_vf_vlan)
1828 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1830 htons(ETH_P_8021Q));
1835 if (tb[IFLA_VF_VLAN_LIST]) {
1836 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
1837 struct nlattr *attr;
1841 if (!ops->ndo_set_vf_vlan)
1844 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
1845 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
1846 nla_len(attr) < NLA_HDRLEN) {
1849 if (len >= MAX_VLAN_LIST_LEN)
1851 ivvl[len] = nla_data(attr);
1858 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
1859 ivvl[0]->qos, ivvl[0]->vlan_proto);
1864 if (tb[IFLA_VF_TX_RATE]) {
1865 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1866 struct ifla_vf_info ivf;
1869 if (ops->ndo_get_vf_config)
1870 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1875 if (ops->ndo_set_vf_rate)
1876 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1883 if (tb[IFLA_VF_RATE]) {
1884 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1887 if (ops->ndo_set_vf_rate)
1888 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1895 if (tb[IFLA_VF_SPOOFCHK]) {
1896 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1899 if (ops->ndo_set_vf_spoofchk)
1900 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1906 if (tb[IFLA_VF_LINK_STATE]) {
1907 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1910 if (ops->ndo_set_vf_link_state)
1911 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1917 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1918 struct ifla_vf_rss_query_en *ivrssq_en;
1921 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1922 if (ops->ndo_set_vf_rss_query_en)
1923 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1924 ivrssq_en->setting);
1929 if (tb[IFLA_VF_TRUST]) {
1930 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
1933 if (ops->ndo_set_vf_trust)
1934 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
1939 if (tb[IFLA_VF_IB_NODE_GUID]) {
1940 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
1942 if (!ops->ndo_set_vf_guid)
1945 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
1948 if (tb[IFLA_VF_IB_PORT_GUID]) {
1949 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
1951 if (!ops->ndo_set_vf_guid)
1954 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
1960 static int do_set_master(struct net_device *dev, int ifindex,
1961 struct netlink_ext_ack *extack)
1963 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1964 const struct net_device_ops *ops;
1968 if (upper_dev->ifindex == ifindex)
1970 ops = upper_dev->netdev_ops;
1971 if (ops->ndo_del_slave) {
1972 err = ops->ndo_del_slave(upper_dev, dev);
1981 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1984 ops = upper_dev->netdev_ops;
1985 if (ops->ndo_add_slave) {
1986 err = ops->ndo_add_slave(upper_dev, dev, extack);
1996 #define DO_SETLINK_MODIFIED 0x01
1997 /* notify flag means notify + modified. */
1998 #define DO_SETLINK_NOTIFY 0x03
1999 static int do_setlink(const struct sk_buff *skb,
2000 struct net_device *dev, struct ifinfomsg *ifm,
2001 struct netlink_ext_ack *extack,
2002 struct nlattr **tb, char *ifname, int status)
2004 const struct net_device_ops *ops = dev->netdev_ops;
2007 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
2008 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
2013 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
2018 err = dev_change_net_namespace(dev, net, ifname);
2022 status |= DO_SETLINK_MODIFIED;
2026 struct rtnl_link_ifmap *u_map;
2029 if (!ops->ndo_set_config) {
2034 if (!netif_device_present(dev)) {
2039 u_map = nla_data(tb[IFLA_MAP]);
2040 k_map.mem_start = (unsigned long) u_map->mem_start;
2041 k_map.mem_end = (unsigned long) u_map->mem_end;
2042 k_map.base_addr = (unsigned short) u_map->base_addr;
2043 k_map.irq = (unsigned char) u_map->irq;
2044 k_map.dma = (unsigned char) u_map->dma;
2045 k_map.port = (unsigned char) u_map->port;
2047 err = ops->ndo_set_config(dev, &k_map);
2051 status |= DO_SETLINK_NOTIFY;
2054 if (tb[IFLA_ADDRESS]) {
2055 struct sockaddr *sa;
2058 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2060 sa = kmalloc(len, GFP_KERNEL);
2065 sa->sa_family = dev->type;
2066 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2068 err = dev_set_mac_address(dev, sa);
2072 status |= DO_SETLINK_MODIFIED;
2076 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2079 status |= DO_SETLINK_MODIFIED;
2082 if (tb[IFLA_GROUP]) {
2083 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2084 status |= DO_SETLINK_NOTIFY;
2088 * Interface selected by interface index but interface
2089 * name provided implies that a name change has been
2092 if (ifm->ifi_index > 0 && ifname[0]) {
2093 err = dev_change_name(dev, ifname);
2096 status |= DO_SETLINK_MODIFIED;
2099 if (tb[IFLA_IFALIAS]) {
2100 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2101 nla_len(tb[IFLA_IFALIAS]));
2104 status |= DO_SETLINK_NOTIFY;
2107 if (tb[IFLA_BROADCAST]) {
2108 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2109 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2112 if (ifm->ifi_flags || ifm->ifi_change) {
2113 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2118 if (tb[IFLA_MASTER]) {
2119 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2122 status |= DO_SETLINK_MODIFIED;
2125 if (tb[IFLA_CARRIER]) {
2126 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2129 status |= DO_SETLINK_MODIFIED;
2132 if (tb[IFLA_TXQLEN]) {
2133 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2134 unsigned int orig_len = dev->tx_queue_len;
2136 if (dev->tx_queue_len ^ value) {
2137 dev->tx_queue_len = value;
2138 err = call_netdevice_notifiers(
2139 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
2140 err = notifier_to_errno(err);
2142 dev->tx_queue_len = orig_len;
2145 status |= DO_SETLINK_NOTIFY;
2149 if (tb[IFLA_OPERSTATE])
2150 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2152 if (tb[IFLA_LINKMODE]) {
2153 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2155 write_lock_bh(&dev_base_lock);
2156 if (dev->link_mode ^ value)
2157 status |= DO_SETLINK_NOTIFY;
2158 dev->link_mode = value;
2159 write_unlock_bh(&dev_base_lock);
2162 if (tb[IFLA_VFINFO_LIST]) {
2163 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2164 struct nlattr *attr;
2167 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2168 if (nla_type(attr) != IFLA_VF_INFO ||
2169 nla_len(attr) < NLA_HDRLEN) {
2173 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2174 ifla_vf_policy, NULL);
2177 err = do_setvfinfo(dev, vfinfo);
2180 status |= DO_SETLINK_NOTIFY;
2185 if (tb[IFLA_VF_PORTS]) {
2186 struct nlattr *port[IFLA_PORT_MAX+1];
2187 struct nlattr *attr;
2192 if (!ops->ndo_set_vf_port)
2195 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2196 if (nla_type(attr) != IFLA_VF_PORT ||
2197 nla_len(attr) < NLA_HDRLEN) {
2201 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2202 ifla_port_policy, NULL);
2205 if (!port[IFLA_PORT_VF]) {
2209 vf = nla_get_u32(port[IFLA_PORT_VF]);
2210 err = ops->ndo_set_vf_port(dev, vf, port);
2213 status |= DO_SETLINK_NOTIFY;
2218 if (tb[IFLA_PORT_SELF]) {
2219 struct nlattr *port[IFLA_PORT_MAX+1];
2221 err = nla_parse_nested(port, IFLA_PORT_MAX,
2222 tb[IFLA_PORT_SELF], ifla_port_policy,
2228 if (ops->ndo_set_vf_port)
2229 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2232 status |= DO_SETLINK_NOTIFY;
2235 if (tb[IFLA_AF_SPEC]) {
2239 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2240 const struct rtnl_af_ops *af_ops;
2242 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
2245 err = af_ops->set_link_af(dev, af);
2249 status |= DO_SETLINK_NOTIFY;
2254 if (tb[IFLA_PROTO_DOWN]) {
2255 err = dev_change_proto_down(dev,
2256 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2259 status |= DO_SETLINK_NOTIFY;
2263 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2266 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2267 ifla_xdp_policy, NULL);
2271 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2276 if (xdp[IFLA_XDP_FLAGS]) {
2277 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2278 if (xdp_flags & ~XDP_FLAGS_MASK) {
2282 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2288 if (xdp[IFLA_XDP_FD]) {
2289 err = dev_change_xdp_fd(dev, extack,
2290 nla_get_s32(xdp[IFLA_XDP_FD]),
2294 status |= DO_SETLINK_NOTIFY;
2299 if (status & DO_SETLINK_MODIFIED) {
2300 if (status & DO_SETLINK_NOTIFY)
2301 netdev_state_change(dev);
2304 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2311 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2312 struct netlink_ext_ack *extack)
2314 struct net *net = sock_net(skb->sk);
2315 struct ifinfomsg *ifm;
2316 struct net_device *dev;
2318 struct nlattr *tb[IFLA_MAX+1];
2319 char ifname[IFNAMSIZ];
2321 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2326 if (tb[IFLA_IFNAME])
2327 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2332 ifm = nlmsg_data(nlh);
2333 if (ifm->ifi_index > 0)
2334 dev = __dev_get_by_index(net, ifm->ifi_index);
2335 else if (tb[IFLA_IFNAME])
2336 dev = __dev_get_by_name(net, ifname);
2345 err = validate_linkmsg(dev, tb);
2349 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2354 static int rtnl_group_dellink(const struct net *net, int group)
2356 struct net_device *dev, *aux;
2357 LIST_HEAD(list_kill);
2363 for_each_netdev(net, dev) {
2364 if (dev->group == group) {
2365 const struct rtnl_link_ops *ops;
2368 ops = dev->rtnl_link_ops;
2369 if (!ops || !ops->dellink)
2377 for_each_netdev_safe(net, dev, aux) {
2378 if (dev->group == group) {
2379 const struct rtnl_link_ops *ops;
2381 ops = dev->rtnl_link_ops;
2382 ops->dellink(dev, &list_kill);
2385 unregister_netdevice_many(&list_kill);
2390 int rtnl_delete_link(struct net_device *dev)
2392 const struct rtnl_link_ops *ops;
2393 LIST_HEAD(list_kill);
2395 ops = dev->rtnl_link_ops;
2396 if (!ops || !ops->dellink)
2399 ops->dellink(dev, &list_kill);
2400 unregister_netdevice_many(&list_kill);
2404 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2406 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2407 struct netlink_ext_ack *extack)
2409 struct net *net = sock_net(skb->sk);
2410 struct net_device *dev;
2411 struct ifinfomsg *ifm;
2412 char ifname[IFNAMSIZ];
2413 struct nlattr *tb[IFLA_MAX+1];
2416 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2420 if (tb[IFLA_IFNAME])
2421 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2423 ifm = nlmsg_data(nlh);
2424 if (ifm->ifi_index > 0)
2425 dev = __dev_get_by_index(net, ifm->ifi_index);
2426 else if (tb[IFLA_IFNAME])
2427 dev = __dev_get_by_name(net, ifname);
2428 else if (tb[IFLA_GROUP])
2429 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
2436 return rtnl_delete_link(dev);
2439 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2441 unsigned int old_flags;
2444 old_flags = dev->flags;
2445 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2446 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2451 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2453 __dev_notify_flags(dev, old_flags, ~0U);
2456 EXPORT_SYMBOL(rtnl_configure_link);
2458 struct net_device *rtnl_create_link(struct net *net,
2459 const char *ifname, unsigned char name_assign_type,
2460 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2462 struct net_device *dev;
2463 unsigned int num_tx_queues = 1;
2464 unsigned int num_rx_queues = 1;
2466 if (tb[IFLA_NUM_TX_QUEUES])
2467 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2468 else if (ops->get_num_tx_queues)
2469 num_tx_queues = ops->get_num_tx_queues();
2471 if (tb[IFLA_NUM_RX_QUEUES])
2472 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2473 else if (ops->get_num_rx_queues)
2474 num_rx_queues = ops->get_num_rx_queues();
2476 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2477 ops->setup, num_tx_queues, num_rx_queues);
2479 return ERR_PTR(-ENOMEM);
2481 dev_net_set(dev, net);
2482 dev->rtnl_link_ops = ops;
2483 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2486 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2487 if (tb[IFLA_ADDRESS]) {
2488 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2489 nla_len(tb[IFLA_ADDRESS]));
2490 dev->addr_assign_type = NET_ADDR_SET;
2492 if (tb[IFLA_BROADCAST])
2493 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2494 nla_len(tb[IFLA_BROADCAST]));
2495 if (tb[IFLA_TXQLEN])
2496 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2497 if (tb[IFLA_OPERSTATE])
2498 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2499 if (tb[IFLA_LINKMODE])
2500 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2502 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2506 EXPORT_SYMBOL(rtnl_create_link);
2508 static int rtnl_group_changelink(const struct sk_buff *skb,
2509 struct net *net, int group,
2510 struct ifinfomsg *ifm,
2511 struct netlink_ext_ack *extack,
2514 struct net_device *dev, *aux;
2517 for_each_netdev_safe(net, dev, aux) {
2518 if (dev->group == group) {
2519 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2528 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2529 struct netlink_ext_ack *extack)
2531 struct net *net = sock_net(skb->sk);
2532 const struct rtnl_link_ops *ops;
2533 const struct rtnl_link_ops *m_ops = NULL;
2534 struct net_device *dev;
2535 struct net_device *master_dev = NULL;
2536 struct ifinfomsg *ifm;
2537 char kind[MODULE_NAME_LEN];
2538 char ifname[IFNAMSIZ];
2539 struct nlattr *tb[IFLA_MAX+1];
2540 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2541 unsigned char name_assign_type = NET_NAME_USER;
2544 #ifdef CONFIG_MODULES
2547 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2551 if (tb[IFLA_IFNAME])
2552 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2556 ifm = nlmsg_data(nlh);
2557 if (ifm->ifi_index > 0)
2558 dev = __dev_get_by_index(net, ifm->ifi_index);
2561 dev = __dev_get_by_name(net, ifname);
2567 master_dev = netdev_master_upper_dev_get(dev);
2569 m_ops = master_dev->rtnl_link_ops;
2572 err = validate_linkmsg(dev, tb);
2576 if (tb[IFLA_LINKINFO]) {
2577 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2578 tb[IFLA_LINKINFO], ifla_info_policy,
2583 memset(linkinfo, 0, sizeof(linkinfo));
2585 if (linkinfo[IFLA_INFO_KIND]) {
2586 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2587 ops = rtnl_link_ops_get(kind);
2594 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2595 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2596 struct nlattr **data = NULL;
2597 struct nlattr **slave_data = NULL;
2598 struct net *dest_net, *link_net = NULL;
2601 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2602 err = nla_parse_nested(attr, ops->maxtype,
2603 linkinfo[IFLA_INFO_DATA],
2609 if (ops->validate) {
2610 err = ops->validate(tb, data, extack);
2617 if (m_ops->slave_maxtype &&
2618 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2619 err = nla_parse_nested(slave_attr,
2620 m_ops->slave_maxtype,
2621 linkinfo[IFLA_INFO_SLAVE_DATA],
2622 m_ops->slave_policy,
2626 slave_data = slave_attr;
2633 if (nlh->nlmsg_flags & NLM_F_EXCL)
2635 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2638 if (linkinfo[IFLA_INFO_DATA]) {
2639 if (!ops || ops != dev->rtnl_link_ops ||
2643 err = ops->changelink(dev, tb, data, extack);
2646 status |= DO_SETLINK_NOTIFY;
2649 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2650 if (!m_ops || !m_ops->slave_changelink)
2653 err = m_ops->slave_changelink(master_dev, dev,
2658 status |= DO_SETLINK_NOTIFY;
2661 return do_setlink(skb, dev, ifm, extack, tb, ifname,
2665 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2666 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2667 return rtnl_group_changelink(skb, net,
2668 nla_get_u32(tb[IFLA_GROUP]),
2673 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
2677 #ifdef CONFIG_MODULES
2680 request_module("rtnl-link-%s", kind);
2682 ops = rtnl_link_ops_get(kind);
2694 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
2695 name_assign_type = NET_NAME_ENUM;
2698 dest_net = rtnl_link_get_net(net, tb);
2699 if (IS_ERR(dest_net))
2700 return PTR_ERR(dest_net);
2703 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2706 if (tb[IFLA_LINK_NETNSID]) {
2707 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2709 link_net = get_net_ns_by_id(dest_net, id);
2715 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2719 dev = rtnl_create_link(link_net ? : dest_net, ifname,
2720 name_assign_type, ops, tb);
2726 dev->ifindex = ifm->ifi_index;
2729 err = ops->newlink(link_net ? : net, dev, tb, data,
2731 /* Drivers should call free_netdev() in ->destructor
2732 * and unregister it on failure after registration
2733 * so that device could be finally freed in rtnl_unlock.
2736 /* If device is not registered at all, free it now */
2737 if (dev->reg_state == NETREG_UNINITIALIZED)
2742 err = register_netdevice(dev);
2748 err = rtnl_configure_link(dev, ifm);
2750 goto out_unregister;
2752 err = dev_change_net_namespace(dev, dest_net, ifname);
2754 goto out_unregister;
2756 if (tb[IFLA_MASTER]) {
2757 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
2760 goto out_unregister;
2769 LIST_HEAD(list_kill);
2771 ops->dellink(dev, &list_kill);
2772 unregister_netdevice_many(&list_kill);
2774 unregister_netdevice(dev);
2780 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2781 struct netlink_ext_ack *extack)
2783 struct net *net = sock_net(skb->sk);
2784 struct ifinfomsg *ifm;
2785 char ifname[IFNAMSIZ];
2786 struct nlattr *tb[IFLA_MAX+1];
2787 struct net_device *dev = NULL;
2788 struct sk_buff *nskb;
2790 u32 ext_filter_mask = 0;
2792 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2796 if (tb[IFLA_IFNAME])
2797 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2799 if (tb[IFLA_EXT_MASK])
2800 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2802 ifm = nlmsg_data(nlh);
2803 if (ifm->ifi_index > 0)
2804 dev = __dev_get_by_index(net, ifm->ifi_index);
2805 else if (tb[IFLA_IFNAME])
2806 dev = __dev_get_by_name(net, ifname);
2813 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
2817 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
2818 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0, NULL);
2820 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2821 WARN_ON(err == -EMSGSIZE);
2824 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
2829 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2831 struct net *net = sock_net(skb->sk);
2832 struct net_device *dev;
2833 struct nlattr *tb[IFLA_MAX+1];
2834 u32 ext_filter_mask = 0;
2835 u16 min_ifinfo_dump_size = 0;
2838 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2839 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2840 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2842 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
2843 if (tb[IFLA_EXT_MASK])
2844 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2847 if (!ext_filter_mask)
2848 return NLMSG_GOODSIZE;
2850 * traverse the list of net devices and compute the minimum
2851 * buffer size based upon the filter mask.
2854 for_each_netdev_rcu(net, dev) {
2855 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
2861 return nlmsg_total_size(min_ifinfo_dump_size);
2864 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
2867 int s_idx = cb->family;
2872 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
2873 int type = cb->nlh->nlmsg_type-RTM_BASE;
2874 struct rtnl_link *handlers;
2875 rtnl_dumpit_func dumpit;
2877 if (idx < s_idx || idx == PF_PACKET)
2880 handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
2884 dumpit = READ_ONCE(handlers[type].dumpit);
2889 memset(&cb->args[0], 0, sizeof(cb->args));
2893 if (dumpit(skb, cb))
2901 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
2902 unsigned int change,
2903 u32 event, gfp_t flags, int *new_nsid)
2905 struct net *net = dev_net(dev);
2906 struct sk_buff *skb;
2908 size_t if_info_size;
2910 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
2914 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event,
2917 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2918 WARN_ON(err == -EMSGSIZE);
2925 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2929 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
2931 struct net *net = dev_net(dev);
2933 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
2936 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
2937 unsigned int change, u32 event,
2938 gfp_t flags, int *new_nsid)
2940 struct sk_buff *skb;
2942 if (dev->reg_state != NETREG_REGISTERED)
2945 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid);
2947 rtmsg_ifinfo_send(skb, dev, flags);
2950 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2953 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, NULL);
2955 EXPORT_SYMBOL(rtmsg_ifinfo);
2957 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
2958 gfp_t flags, int *new_nsid)
2960 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
2964 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2965 struct net_device *dev,
2966 u8 *addr, u16 vid, u32 pid, u32 seq,
2967 int type, unsigned int flags,
2968 int nlflags, u16 ndm_state)
2970 struct nlmsghdr *nlh;
2973 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2977 ndm = nlmsg_data(nlh);
2978 ndm->ndm_family = AF_BRIDGE;
2981 ndm->ndm_flags = flags;
2983 ndm->ndm_ifindex = dev->ifindex;
2984 ndm->ndm_state = ndm_state;
2986 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2987 goto nla_put_failure;
2989 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
2990 goto nla_put_failure;
2992 nlmsg_end(skb, nlh);
2996 nlmsg_cancel(skb, nlh);
3000 static inline size_t rtnl_fdb_nlmsg_size(void)
3002 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3003 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3004 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3008 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3011 struct net *net = dev_net(dev);
3012 struct sk_buff *skb;
3015 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3019 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3020 0, 0, type, NTF_SELF, 0, ndm_state);
3026 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3029 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3033 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3035 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3036 struct nlattr *tb[],
3037 struct net_device *dev,
3038 const unsigned char *addr, u16 vid,
3043 /* If aging addresses are supported device will need to
3044 * implement its own handler for this.
3046 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3047 pr_info("%s: FDB only supports static addresses\n", dev->name);
3052 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3056 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3057 err = dev_uc_add_excl(dev, addr);
3058 else if (is_multicast_ether_addr(addr))
3059 err = dev_mc_add_excl(dev, addr);
3061 /* Only return duplicate errors if NLM_F_EXCL is set */
3062 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3067 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3069 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
3074 if (nla_len(vlan_attr) != sizeof(u16)) {
3075 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
3079 vid = nla_get_u16(vlan_attr);
3081 if (!vid || vid >= VLAN_VID_MASK) {
3082 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
3091 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3092 struct netlink_ext_ack *extack)
3094 struct net *net = sock_net(skb->sk);
3096 struct nlattr *tb[NDA_MAX+1];
3097 struct net_device *dev;
3102 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3106 ndm = nlmsg_data(nlh);
3107 if (ndm->ndm_ifindex == 0) {
3108 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
3112 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3114 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
3118 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3119 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
3123 addr = nla_data(tb[NDA_LLADDR]);
3125 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3131 /* Support fdb on master device the net/bridge default case */
3132 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3133 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3134 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3135 const struct net_device_ops *ops = br_dev->netdev_ops;
3137 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3142 ndm->ndm_flags &= ~NTF_MASTER;
3145 /* Embedded bridge, macvlan, and any other device support */
3146 if ((ndm->ndm_flags & NTF_SELF)) {
3147 if (dev->netdev_ops->ndo_fdb_add)
3148 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3152 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3156 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3158 ndm->ndm_flags &= ~NTF_SELF;
3166 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3168 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3169 struct nlattr *tb[],
3170 struct net_device *dev,
3171 const unsigned char *addr, u16 vid)
3175 /* If aging addresses are supported device will need to
3176 * implement its own handler for this.
3178 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3179 pr_info("%s: FDB only supports static addresses\n", dev->name);
3183 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3184 err = dev_uc_del(dev, addr);
3185 else if (is_multicast_ether_addr(addr))
3186 err = dev_mc_del(dev, addr);
3190 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3192 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3193 struct netlink_ext_ack *extack)
3195 struct net *net = sock_net(skb->sk);
3197 struct nlattr *tb[NDA_MAX+1];
3198 struct net_device *dev;
3203 if (!netlink_capable(skb, CAP_NET_ADMIN))
3206 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3210 ndm = nlmsg_data(nlh);
3211 if (ndm->ndm_ifindex == 0) {
3212 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3216 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3218 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3222 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3223 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
3227 addr = nla_data(tb[NDA_LLADDR]);
3229 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3235 /* Support fdb on master device the net/bridge default case */
3236 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3237 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3238 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3239 const struct net_device_ops *ops = br_dev->netdev_ops;
3241 if (ops->ndo_fdb_del)
3242 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3247 ndm->ndm_flags &= ~NTF_MASTER;
3250 /* Embedded bridge, macvlan, and any other device support */
3251 if (ndm->ndm_flags & NTF_SELF) {
3252 if (dev->netdev_ops->ndo_fdb_del)
3253 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3256 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3259 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3261 ndm->ndm_flags &= ~NTF_SELF;
3268 static int nlmsg_populate_fdb(struct sk_buff *skb,
3269 struct netlink_callback *cb,
3270 struct net_device *dev,
3272 struct netdev_hw_addr_list *list)
3274 struct netdev_hw_addr *ha;
3278 portid = NETLINK_CB(cb->skb).portid;
3279 seq = cb->nlh->nlmsg_seq;
3281 list_for_each_entry(ha, &list->list, list) {
3282 if (*idx < cb->args[2])
3285 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3287 RTM_NEWNEIGH, NTF_SELF,
3288 NLM_F_MULTI, NUD_PERMANENT);
3298 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3299 * @nlh: netlink message header
3302 * Default netdevice operation to dump the existing unicast address list.
3303 * Returns number of addresses from list put in skb.
3305 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3306 struct netlink_callback *cb,
3307 struct net_device *dev,
3308 struct net_device *filter_dev,
3313 netif_addr_lock_bh(dev);
3314 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3317 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3319 netif_addr_unlock_bh(dev);
3322 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3324 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3326 struct net_device *dev;
3327 struct nlattr *tb[IFLA_MAX+1];
3328 struct net_device *br_dev = NULL;
3329 const struct net_device_ops *ops = NULL;
3330 const struct net_device_ops *cops = NULL;
3331 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3332 struct net *net = sock_net(skb->sk);
3333 struct hlist_head *head;
3341 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3342 IFLA_MAX, ifla_policy, NULL);
3345 } else if (err == 0) {
3346 if (tb[IFLA_MASTER])
3347 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3350 brport_idx = ifm->ifi_index;
3353 br_dev = __dev_get_by_index(net, br_idx);
3357 ops = br_dev->netdev_ops;
3361 s_idx = cb->args[1];
3363 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3365 head = &net->dev_index_head[h];
3366 hlist_for_each_entry(dev, head, index_hlist) {
3368 if (brport_idx && (dev->ifindex != brport_idx))
3371 if (!br_idx) { /* user did not specify a specific bridge */
3372 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3373 br_dev = netdev_master_upper_dev_get(dev);
3374 cops = br_dev->netdev_ops;
3377 if (dev != br_dev &&
3378 !(dev->priv_flags & IFF_BRIDGE_PORT))
3381 if (br_dev != netdev_master_upper_dev_get(dev) &&
3382 !(dev->priv_flags & IFF_EBRIDGE))
3390 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3391 if (cops && cops->ndo_fdb_dump) {
3392 err = cops->ndo_fdb_dump(skb, cb,
3395 if (err == -EMSGSIZE)
3400 if (dev->netdev_ops->ndo_fdb_dump)
3401 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3405 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3407 if (err == -EMSGSIZE)
3412 /* reset fdb offset to 0 for rest of the interfaces */
3428 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3429 unsigned int attrnum, unsigned int flag)
3432 return nla_put_u8(skb, attrnum, !!(flags & flag));
3436 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3437 struct net_device *dev, u16 mode,
3438 u32 flags, u32 mask, int nlflags,
3440 int (*vlan_fill)(struct sk_buff *skb,
3441 struct net_device *dev,
3444 struct nlmsghdr *nlh;
3445 struct ifinfomsg *ifm;
3446 struct nlattr *br_afspec;
3447 struct nlattr *protinfo;
3448 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3449 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3452 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3456 ifm = nlmsg_data(nlh);
3457 ifm->ifi_family = AF_BRIDGE;
3459 ifm->ifi_type = dev->type;
3460 ifm->ifi_index = dev->ifindex;
3461 ifm->ifi_flags = dev_get_flags(dev);
3462 ifm->ifi_change = 0;
3465 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3466 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3467 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3469 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3471 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3472 (dev->ifindex != dev_get_iflink(dev) &&
3473 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3474 goto nla_put_failure;
3476 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3478 goto nla_put_failure;
3480 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3481 nla_nest_cancel(skb, br_afspec);
3482 goto nla_put_failure;
3485 if (mode != BRIDGE_MODE_UNDEF) {
3486 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3487 nla_nest_cancel(skb, br_afspec);
3488 goto nla_put_failure;
3492 err = vlan_fill(skb, dev, filter_mask);
3494 nla_nest_cancel(skb, br_afspec);
3495 goto nla_put_failure;
3498 nla_nest_end(skb, br_afspec);
3500 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3502 goto nla_put_failure;
3504 if (brport_nla_put_flag(skb, flags, mask,
3505 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3506 brport_nla_put_flag(skb, flags, mask,
3507 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3508 brport_nla_put_flag(skb, flags, mask,
3509 IFLA_BRPORT_FAST_LEAVE,
3510 BR_MULTICAST_FAST_LEAVE) ||
3511 brport_nla_put_flag(skb, flags, mask,
3512 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3513 brport_nla_put_flag(skb, flags, mask,
3514 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3515 brport_nla_put_flag(skb, flags, mask,
3516 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3517 brport_nla_put_flag(skb, flags, mask,
3518 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3519 brport_nla_put_flag(skb, flags, mask,
3520 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3521 nla_nest_cancel(skb, protinfo);
3522 goto nla_put_failure;
3525 nla_nest_end(skb, protinfo);
3527 nlmsg_end(skb, nlh);
3530 nlmsg_cancel(skb, nlh);
3531 return err ? err : -EMSGSIZE;
3533 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3535 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3537 struct net *net = sock_net(skb->sk);
3538 struct net_device *dev;
3540 u32 portid = NETLINK_CB(cb->skb).portid;
3541 u32 seq = cb->nlh->nlmsg_seq;
3542 u32 filter_mask = 0;
3545 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3546 struct nlattr *extfilt;
3548 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3551 if (nla_len(extfilt) < sizeof(filter_mask))
3554 filter_mask = nla_get_u32(extfilt);
3559 for_each_netdev_rcu(net, dev) {
3560 const struct net_device_ops *ops = dev->netdev_ops;
3561 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3563 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3564 if (idx >= cb->args[0]) {
3565 err = br_dev->netdev_ops->ndo_bridge_getlink(
3566 skb, portid, seq, dev,
3567 filter_mask, NLM_F_MULTI);
3568 if (err < 0 && err != -EOPNOTSUPP) {
3569 if (likely(skb->len))
3578 if (ops->ndo_bridge_getlink) {
3579 if (idx >= cb->args[0]) {
3580 err = ops->ndo_bridge_getlink(skb, portid,
3584 if (err < 0 && err != -EOPNOTSUPP) {
3585 if (likely(skb->len))
3602 static inline size_t bridge_nlmsg_size(void)
3604 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3605 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3606 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3607 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3608 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3609 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3610 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3611 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3612 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3613 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3614 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3617 static int rtnl_bridge_notify(struct net_device *dev)
3619 struct net *net = dev_net(dev);
3620 struct sk_buff *skb;
3621 int err = -EOPNOTSUPP;
3623 if (!dev->netdev_ops->ndo_bridge_getlink)
3626 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3632 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3639 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3642 WARN_ON(err == -EMSGSIZE);
3645 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3649 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3650 struct netlink_ext_ack *extack)
3652 struct net *net = sock_net(skb->sk);
3653 struct ifinfomsg *ifm;
3654 struct net_device *dev;
3655 struct nlattr *br_spec, *attr = NULL;
3656 int rem, err = -EOPNOTSUPP;
3658 bool have_flags = false;
3660 if (nlmsg_len(nlh) < sizeof(*ifm))
3663 ifm = nlmsg_data(nlh);
3664 if (ifm->ifi_family != AF_BRIDGE)
3665 return -EPFNOSUPPORT;
3667 dev = __dev_get_by_index(net, ifm->ifi_index);
3669 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3673 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3675 nla_for_each_nested(attr, br_spec, rem) {
3676 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3677 if (nla_len(attr) < sizeof(flags))
3681 flags = nla_get_u16(attr);
3687 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3688 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3690 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
3695 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
3699 flags &= ~BRIDGE_FLAGS_MASTER;
3702 if ((flags & BRIDGE_FLAGS_SELF)) {
3703 if (!dev->netdev_ops->ndo_bridge_setlink)
3706 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
3709 flags &= ~BRIDGE_FLAGS_SELF;
3711 /* Generate event to notify upper layer of bridge
3714 err = rtnl_bridge_notify(dev);
3719 memcpy(nla_data(attr), &flags, sizeof(flags));
3724 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3725 struct netlink_ext_ack *extack)
3727 struct net *net = sock_net(skb->sk);
3728 struct ifinfomsg *ifm;
3729 struct net_device *dev;
3730 struct nlattr *br_spec, *attr = NULL;
3731 int rem, err = -EOPNOTSUPP;
3733 bool have_flags = false;
3735 if (nlmsg_len(nlh) < sizeof(*ifm))
3738 ifm = nlmsg_data(nlh);
3739 if (ifm->ifi_family != AF_BRIDGE)
3740 return -EPFNOSUPPORT;
3742 dev = __dev_get_by_index(net, ifm->ifi_index);
3744 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3748 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3750 nla_for_each_nested(attr, br_spec, rem) {
3751 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3752 if (nla_len(attr) < sizeof(flags))
3756 flags = nla_get_u16(attr);
3762 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3763 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3765 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
3770 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
3774 flags &= ~BRIDGE_FLAGS_MASTER;
3777 if ((flags & BRIDGE_FLAGS_SELF)) {
3778 if (!dev->netdev_ops->ndo_bridge_dellink)
3781 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
3785 flags &= ~BRIDGE_FLAGS_SELF;
3787 /* Generate event to notify upper layer of bridge
3790 err = rtnl_bridge_notify(dev);
3795 memcpy(nla_data(attr), &flags, sizeof(flags));
3800 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3802 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3803 (!idxattr || idxattr == attrid);
3806 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3807 static int rtnl_get_offload_stats_attr_size(int attr_id)
3810 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
3811 return sizeof(struct rtnl_link_stats64);
3817 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
3820 struct nlattr *attr = NULL;
3825 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3826 dev->netdev_ops->ndo_get_offload_stats))
3829 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3830 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3831 if (attr_id < *prividx)
3834 size = rtnl_get_offload_stats_attr_size(attr_id);
3838 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3841 attr = nla_reserve_64bit(skb, attr_id, size,
3842 IFLA_OFFLOAD_XSTATS_UNSPEC);
3844 goto nla_put_failure;
3846 attr_data = nla_data(attr);
3847 memset(attr_data, 0, size);
3848 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
3851 goto get_offload_stats_failure;
3862 get_offload_stats_failure:
3867 static int rtnl_get_offload_stats_size(const struct net_device *dev)
3873 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3874 dev->netdev_ops->ndo_get_offload_stats))
3877 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3878 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3879 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3881 size = rtnl_get_offload_stats_attr_size(attr_id);
3882 nla_size += nla_total_size_64bit(size);
3886 nla_size += nla_total_size(0);
3891 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3892 int type, u32 pid, u32 seq, u32 change,
3893 unsigned int flags, unsigned int filter_mask,
3894 int *idxattr, int *prividx)
3896 struct if_stats_msg *ifsm;
3897 struct nlmsghdr *nlh;
3898 struct nlattr *attr;
3899 int s_prividx = *prividx;
3904 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3908 ifsm = nlmsg_data(nlh);
3909 ifsm->family = PF_UNSPEC;
3912 ifsm->ifindex = dev->ifindex;
3913 ifsm->filter_mask = filter_mask;
3915 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3916 struct rtnl_link_stats64 *sp;
3918 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3919 sizeof(struct rtnl_link_stats64),
3922 goto nla_put_failure;
3924 sp = nla_data(attr);
3925 dev_get_stats(dev, sp);
3928 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3929 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3931 if (ops && ops->fill_linkxstats) {
3932 *idxattr = IFLA_STATS_LINK_XSTATS;
3933 attr = nla_nest_start(skb,
3934 IFLA_STATS_LINK_XSTATS);
3936 goto nla_put_failure;
3938 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3939 nla_nest_end(skb, attr);
3941 goto nla_put_failure;
3946 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3948 const struct rtnl_link_ops *ops = NULL;
3949 const struct net_device *master;
3951 master = netdev_master_upper_dev_get(dev);
3953 ops = master->rtnl_link_ops;
3954 if (ops && ops->fill_linkxstats) {
3955 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3956 attr = nla_nest_start(skb,
3957 IFLA_STATS_LINK_XSTATS_SLAVE);
3959 goto nla_put_failure;
3961 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3962 nla_nest_end(skb, attr);
3964 goto nla_put_failure;
3969 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
3971 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
3972 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
3974 goto nla_put_failure;
3976 err = rtnl_get_offload_stats(skb, dev, prividx);
3977 if (err == -ENODATA)
3978 nla_nest_cancel(skb, attr);
3980 nla_nest_end(skb, attr);
3982 if (err && err != -ENODATA)
3983 goto nla_put_failure;
3987 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
3988 struct rtnl_af_ops *af_ops;
3990 *idxattr = IFLA_STATS_AF_SPEC;
3991 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
3993 goto nla_put_failure;
3995 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
3996 if (af_ops->fill_stats_af) {
4000 af = nla_nest_start(skb, af_ops->family);
4002 goto nla_put_failure;
4004 err = af_ops->fill_stats_af(skb, dev);
4006 if (err == -ENODATA)
4007 nla_nest_cancel(skb, af);
4009 goto nla_put_failure;
4011 nla_nest_end(skb, af);
4015 nla_nest_end(skb, attr);
4020 nlmsg_end(skb, nlh);
4025 /* not a multi message or no progress mean a real error */
4026 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
4027 nlmsg_cancel(skb, nlh);
4029 nlmsg_end(skb, nlh);
4034 static size_t if_nlmsg_stats_size(const struct net_device *dev,
4039 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4040 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
4042 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4043 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4044 int attr = IFLA_STATS_LINK_XSTATS;
4046 if (ops && ops->get_linkxstats_size) {
4047 size += nla_total_size(ops->get_linkxstats_size(dev,
4049 /* for IFLA_STATS_LINK_XSTATS */
4050 size += nla_total_size(0);
4054 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4055 struct net_device *_dev = (struct net_device *)dev;
4056 const struct rtnl_link_ops *ops = NULL;
4057 const struct net_device *master;
4059 /* netdev_master_upper_dev_get can't take const */
4060 master = netdev_master_upper_dev_get(_dev);
4062 ops = master->rtnl_link_ops;
4063 if (ops && ops->get_linkxstats_size) {
4064 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4066 size += nla_total_size(ops->get_linkxstats_size(dev,
4068 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4069 size += nla_total_size(0);
4073 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4074 size += rtnl_get_offload_stats_size(dev);
4076 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4077 struct rtnl_af_ops *af_ops;
4079 /* for IFLA_STATS_AF_SPEC */
4080 size += nla_total_size(0);
4082 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
4083 if (af_ops->get_stats_af_size) {
4084 size += nla_total_size(
4085 af_ops->get_stats_af_size(dev));
4088 size += nla_total_size(0);
4096 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4097 struct netlink_ext_ack *extack)
4099 struct net *net = sock_net(skb->sk);
4100 struct net_device *dev = NULL;
4101 int idxattr = 0, prividx = 0;
4102 struct if_stats_msg *ifsm;
4103 struct sk_buff *nskb;
4107 if (nlmsg_len(nlh) < sizeof(*ifsm))
4110 ifsm = nlmsg_data(nlh);
4111 if (ifsm->ifindex > 0)
4112 dev = __dev_get_by_index(net, ifsm->ifindex);
4119 filter_mask = ifsm->filter_mask;
4123 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4127 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4128 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4129 0, filter_mask, &idxattr, &prividx);
4131 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4132 WARN_ON(err == -EMSGSIZE);
4135 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4141 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4143 int h, s_h, err, s_idx, s_idxattr, s_prividx;
4144 struct net *net = sock_net(skb->sk);
4145 unsigned int flags = NLM_F_MULTI;
4146 struct if_stats_msg *ifsm;
4147 struct hlist_head *head;
4148 struct net_device *dev;
4149 u32 filter_mask = 0;
4153 s_idx = cb->args[1];
4154 s_idxattr = cb->args[2];
4155 s_prividx = cb->args[3];
4157 cb->seq = net->dev_base_seq;
4159 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4162 ifsm = nlmsg_data(cb->nlh);
4163 filter_mask = ifsm->filter_mask;
4167 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4169 head = &net->dev_index_head[h];
4170 hlist_for_each_entry(dev, head, index_hlist) {
4173 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4174 NETLINK_CB(cb->skb).portid,
4175 cb->nlh->nlmsg_seq, 0,
4177 &s_idxattr, &s_prividx);
4178 /* If we ran out of room on the first message,
4181 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4187 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4193 cb->args[3] = s_prividx;
4194 cb->args[2] = s_idxattr;
4201 /* Process one rtnetlink message. */
4203 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4204 struct netlink_ext_ack *extack)
4206 struct net *net = sock_net(skb->sk);
4207 struct rtnl_link *handlers;
4208 int err = -EOPNOTSUPP;
4209 rtnl_doit_func doit;
4215 type = nlh->nlmsg_type;
4221 /* All the messages must have at least 1 byte length */
4222 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
4225 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4228 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4231 if (family >= ARRAY_SIZE(rtnl_msg_handlers))
4235 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4238 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4241 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4243 rtnl_dumpit_func dumpit;
4244 u16 min_dump_alloc = 0;
4246 dumpit = READ_ONCE(handlers[type].dumpit);
4249 handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
4253 dumpit = READ_ONCE(handlers[type].dumpit);
4258 refcount_inc(&rtnl_msg_handlers_ref[family]);
4260 if (type == RTM_GETLINK - RTM_BASE)
4261 min_dump_alloc = rtnl_calcit(skb, nlh);
4267 struct netlink_dump_control c = {
4269 .min_dump_alloc = min_dump_alloc,
4271 err = netlink_dump_start(rtnl, skb, nlh, &c);
4273 refcount_dec(&rtnl_msg_handlers_ref[family]);
4277 doit = READ_ONCE(handlers[type].doit);
4280 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4283 flags = READ_ONCE(handlers[type].flags);
4284 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
4285 refcount_inc(&rtnl_msg_handlers_ref[family]);
4286 doit = READ_ONCE(handlers[type].doit);
4289 err = doit(skb, nlh, extack);
4290 refcount_dec(&rtnl_msg_handlers_ref[family]);
4297 handlers = rtnl_dereference(rtnl_msg_handlers[family]);
4299 doit = READ_ONCE(handlers[type].doit);
4301 err = doit(skb, nlh, extack);
4311 static void rtnetlink_rcv(struct sk_buff *skb)
4313 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4316 static int rtnetlink_bind(struct net *net, int group)
4319 case RTNLGRP_IPV4_MROUTE_R:
4320 case RTNLGRP_IPV6_MROUTE_R:
4321 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4328 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4330 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4334 case NETDEV_CHANGEADDR:
4335 case NETDEV_CHANGENAME:
4336 case NETDEV_FEAT_CHANGE:
4337 case NETDEV_BONDING_FAILOVER:
4338 case NETDEV_NOTIFY_PEERS:
4339 case NETDEV_RESEND_IGMP:
4340 case NETDEV_CHANGEINFODATA:
4341 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4350 static struct notifier_block rtnetlink_dev_notifier = {
4351 .notifier_call = rtnetlink_event,
4355 static int __net_init rtnetlink_net_init(struct net *net)
4358 struct netlink_kernel_cfg cfg = {
4359 .groups = RTNLGRP_MAX,
4360 .input = rtnetlink_rcv,
4361 .cb_mutex = &rtnl_mutex,
4362 .flags = NL_CFG_F_NONROOT_RECV,
4363 .bind = rtnetlink_bind,
4366 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4373 static void __net_exit rtnetlink_net_exit(struct net *net)
4375 netlink_kernel_release(net->rtnl);
4379 static struct pernet_operations rtnetlink_net_ops = {
4380 .init = rtnetlink_net_init,
4381 .exit = rtnetlink_net_exit,
4384 void __init rtnetlink_init(void)
4388 for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
4389 refcount_set(&rtnl_msg_handlers_ref[i], 1);
4391 if (register_pernet_subsys(&rtnetlink_net_ops))
4392 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4394 register_netdevice_notifier(&rtnetlink_dev_notifier);
4396 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4397 rtnl_dump_ifinfo, 0);
4398 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
4399 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
4400 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
4402 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
4403 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
4404 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
4406 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
4407 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
4408 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
4410 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
4411 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
4412 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
4414 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,