2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit;
70 static DEFINE_MUTEX(rtnl_mutex);
74 mutex_lock(&rtnl_mutex);
76 EXPORT_SYMBOL(rtnl_lock);
78 int rtnl_lock_killable(void)
80 return mutex_lock_killable(&rtnl_mutex);
82 EXPORT_SYMBOL(rtnl_lock_killable);
84 static struct sk_buff *defer_kfree_skb_list;
85 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
88 tail->next = defer_kfree_skb_list;
89 defer_kfree_skb_list = head;
92 EXPORT_SYMBOL(rtnl_kfree_skbs);
94 void __rtnl_unlock(void)
96 struct sk_buff *head = defer_kfree_skb_list;
98 defer_kfree_skb_list = NULL;
100 mutex_unlock(&rtnl_mutex);
103 struct sk_buff *next = head->next;
111 void rtnl_unlock(void)
113 /* This fellow will unlock it for us. */
116 EXPORT_SYMBOL(rtnl_unlock);
118 int rtnl_trylock(void)
120 return mutex_trylock(&rtnl_mutex);
122 EXPORT_SYMBOL(rtnl_trylock);
124 int rtnl_is_locked(void)
126 return mutex_is_locked(&rtnl_mutex);
128 EXPORT_SYMBOL(rtnl_is_locked);
130 #ifdef CONFIG_PROVE_LOCKING
131 bool lockdep_rtnl_is_held(void)
133 return lockdep_is_held(&rtnl_mutex);
135 EXPORT_SYMBOL(lockdep_rtnl_is_held);
136 #endif /* #ifdef CONFIG_PROVE_LOCKING */
138 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
140 static inline int rtm_msgindex(int msgtype)
142 int msgindex = msgtype - RTM_BASE;
145 * msgindex < 0 implies someone tried to register a netlink
146 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
147 * the message type has not been added to linux/rtnetlink.h
149 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
154 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
156 struct rtnl_link **tab;
158 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
159 protocol = PF_UNSPEC;
161 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
163 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
168 static int rtnl_register_internal(struct module *owner,
169 int protocol, int msgtype,
170 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
173 struct rtnl_link *link, *old;
174 struct rtnl_link __rcu **tab;
178 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
179 msgindex = rtm_msgindex(msgtype);
182 tab = rtnl_msg_handlers[protocol];
184 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
188 /* ensures we see the 0 stores */
189 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
192 old = rtnl_dereference(tab[msgindex]);
194 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
198 link = kzalloc(sizeof(*link), GFP_KERNEL);
203 WARN_ON(link->owner && link->owner != owner);
206 WARN_ON(doit && link->doit && link->doit != doit);
209 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
211 link->dumpit = dumpit;
213 link->flags |= flags;
215 /* publish protocol:msgtype */
216 rcu_assign_pointer(tab[msgindex], link);
226 * rtnl_register_module - Register a rtnetlink message type
228 * @owner: module registering the hook (THIS_MODULE)
229 * @protocol: Protocol family or PF_UNSPEC
230 * @msgtype: rtnetlink message type
231 * @doit: Function pointer called for each request message
232 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
233 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
235 * Like rtnl_register, but for use by removable modules.
237 int rtnl_register_module(struct module *owner,
238 int protocol, int msgtype,
239 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
242 return rtnl_register_internal(owner, protocol, msgtype,
243 doit, dumpit, flags);
245 EXPORT_SYMBOL_GPL(rtnl_register_module);
248 * rtnl_register - Register a rtnetlink message type
249 * @protocol: Protocol family or PF_UNSPEC
250 * @msgtype: rtnetlink message type
251 * @doit: Function pointer called for each request message
252 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
253 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
255 * Registers the specified function pointers (at least one of them has
256 * to be non-NULL) to be called whenever a request message for the
257 * specified protocol family and message type is received.
259 * The special protocol family PF_UNSPEC may be used to define fallback
260 * function pointers for the case when no entry for the specific protocol
263 void rtnl_register(int protocol, int msgtype,
264 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
269 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
272 pr_err("Unable to register rtnetlink message handler, "
273 "protocol = %d, message type = %d\n", protocol, msgtype);
277 * rtnl_unregister - Unregister a rtnetlink message type
278 * @protocol: Protocol family or PF_UNSPEC
279 * @msgtype: rtnetlink message type
281 * Returns 0 on success or a negative error code.
283 int rtnl_unregister(int protocol, int msgtype)
285 struct rtnl_link **tab, *link;
288 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
289 msgindex = rtm_msgindex(msgtype);
292 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
298 link = tab[msgindex];
299 rcu_assign_pointer(tab[msgindex], NULL);
302 kfree_rcu(link, rcu);
306 EXPORT_SYMBOL_GPL(rtnl_unregister);
309 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
310 * @protocol : Protocol family or PF_UNSPEC
312 * Identical to calling rtnl_unregster() for all registered message types
313 * of a certain protocol family.
315 void rtnl_unregister_all(int protocol)
317 struct rtnl_link **tab, *link;
320 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
323 tab = rtnl_msg_handlers[protocol];
324 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
325 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
326 link = tab[msgindex];
330 rcu_assign_pointer(tab[msgindex], NULL);
331 kfree_rcu(link, rcu);
339 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
341 static LIST_HEAD(link_ops);
343 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
345 const struct rtnl_link_ops *ops;
347 list_for_each_entry(ops, &link_ops, list) {
348 if (!strcmp(ops->kind, kind))
355 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
356 * @ops: struct rtnl_link_ops * to register
358 * The caller must hold the rtnl_mutex. This function should be used
359 * by drivers that create devices during module initialization. It
360 * must be called before registering the devices.
362 * Returns 0 on success or a negative error code.
364 int __rtnl_link_register(struct rtnl_link_ops *ops)
366 if (rtnl_link_ops_get(ops->kind))
369 /* The check for setup is here because if ops
370 * does not have that filled up, it is not possible
371 * to use the ops for creating device. So do not
372 * fill up dellink as well. That disables rtnl_dellink.
374 if (ops->setup && !ops->dellink)
375 ops->dellink = unregister_netdevice_queue;
377 list_add_tail(&ops->list, &link_ops);
380 EXPORT_SYMBOL_GPL(__rtnl_link_register);
383 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
384 * @ops: struct rtnl_link_ops * to register
386 * Returns 0 on success or a negative error code.
388 int rtnl_link_register(struct rtnl_link_ops *ops)
393 err = __rtnl_link_register(ops);
397 EXPORT_SYMBOL_GPL(rtnl_link_register);
399 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
401 struct net_device *dev;
402 LIST_HEAD(list_kill);
404 for_each_netdev(net, dev) {
405 if (dev->rtnl_link_ops == ops)
406 ops->dellink(dev, &list_kill);
408 unregister_netdevice_many(&list_kill);
412 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
413 * @ops: struct rtnl_link_ops * to unregister
415 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
416 * integrity (hold pernet_ops_rwsem for writing to close the race
417 * with setup_net() and cleanup_net()).
419 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
424 __rtnl_kill_links(net, ops);
426 list_del(&ops->list);
428 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
430 /* Return with the rtnl_lock held when there are no network
431 * devices unregistering in any network namespace.
433 static void rtnl_lock_unregistering_all(void)
437 DEFINE_WAIT_FUNC(wait, woken_wake_function);
439 add_wait_queue(&netdev_unregistering_wq, &wait);
441 unregistering = false;
443 /* We held write locked pernet_ops_rwsem, and parallel
444 * setup_net() and cleanup_net() are not possible.
447 if (net->dev_unreg_count > 0) {
448 unregistering = true;
456 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
458 remove_wait_queue(&netdev_unregistering_wq, &wait);
462 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
463 * @ops: struct rtnl_link_ops * to unregister
465 void rtnl_link_unregister(struct rtnl_link_ops *ops)
467 /* Close the race with setup_net() and cleanup_net() */
468 down_write(&pernet_ops_rwsem);
469 rtnl_lock_unregistering_all();
470 __rtnl_link_unregister(ops);
472 up_write(&pernet_ops_rwsem);
474 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
476 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
478 struct net_device *master_dev;
479 const struct rtnl_link_ops *ops;
484 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
488 ops = master_dev->rtnl_link_ops;
489 if (!ops || !ops->get_slave_size)
491 /* IFLA_INFO_SLAVE_DATA + nested data */
492 size = nla_total_size(sizeof(struct nlattr)) +
493 ops->get_slave_size(master_dev, dev);
500 static size_t rtnl_link_get_size(const struct net_device *dev)
502 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
508 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
509 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
512 /* IFLA_INFO_DATA + nested data */
513 size += nla_total_size(sizeof(struct nlattr)) +
516 if (ops->get_xstats_size)
517 /* IFLA_INFO_XSTATS */
518 size += nla_total_size(ops->get_xstats_size(dev));
520 size += rtnl_link_get_slave_info_data_size(dev);
525 static LIST_HEAD(rtnl_af_ops);
527 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
529 const struct rtnl_af_ops *ops;
531 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
532 if (ops->family == family)
540 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
541 * @ops: struct rtnl_af_ops * to register
543 * Returns 0 on success or a negative error code.
545 void rtnl_af_register(struct rtnl_af_ops *ops)
548 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
551 EXPORT_SYMBOL_GPL(rtnl_af_register);
554 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
555 * @ops: struct rtnl_af_ops * to unregister
557 void rtnl_af_unregister(struct rtnl_af_ops *ops)
560 list_del_rcu(&ops->list);
565 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
567 static size_t rtnl_link_get_af_size(const struct net_device *dev,
570 struct rtnl_af_ops *af_ops;
574 size = nla_total_size(sizeof(struct nlattr));
577 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
578 if (af_ops->get_link_af_size) {
579 /* AF_* + nested data */
580 size += nla_total_size(sizeof(struct nlattr)) +
581 af_ops->get_link_af_size(dev, ext_filter_mask);
589 static bool rtnl_have_link_slave_info(const struct net_device *dev)
591 struct net_device *master_dev;
596 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
597 if (master_dev && master_dev->rtnl_link_ops)
603 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
604 const struct net_device *dev)
606 struct net_device *master_dev;
607 const struct rtnl_link_ops *ops;
608 struct nlattr *slave_data;
611 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
614 ops = master_dev->rtnl_link_ops;
617 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
619 if (ops->fill_slave_info) {
620 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
623 err = ops->fill_slave_info(skb, master_dev, dev);
625 goto err_cancel_slave_data;
626 nla_nest_end(skb, slave_data);
630 err_cancel_slave_data:
631 nla_nest_cancel(skb, slave_data);
635 static int rtnl_link_info_fill(struct sk_buff *skb,
636 const struct net_device *dev)
638 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
644 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
646 if (ops->fill_xstats) {
647 err = ops->fill_xstats(skb, dev);
651 if (ops->fill_info) {
652 data = nla_nest_start(skb, IFLA_INFO_DATA);
655 err = ops->fill_info(skb, dev);
657 goto err_cancel_data;
658 nla_nest_end(skb, data);
663 nla_nest_cancel(skb, data);
667 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
669 struct nlattr *linkinfo;
672 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
673 if (linkinfo == NULL)
676 err = rtnl_link_info_fill(skb, dev);
678 goto err_cancel_link;
680 err = rtnl_link_slave_info_fill(skb, dev);
682 goto err_cancel_link;
684 nla_nest_end(skb, linkinfo);
688 nla_nest_cancel(skb, linkinfo);
693 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
695 struct sock *rtnl = net->rtnl;
698 NETLINK_CB(skb).dst_group = group;
700 refcount_inc(&skb->users);
701 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
703 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
707 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
709 struct sock *rtnl = net->rtnl;
711 return nlmsg_unicast(rtnl, skb, pid);
713 EXPORT_SYMBOL(rtnl_unicast);
715 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
716 struct nlmsghdr *nlh, gfp_t flags)
718 struct sock *rtnl = net->rtnl;
722 report = nlmsg_report(nlh);
724 nlmsg_notify(rtnl, skb, pid, group, report, flags);
726 EXPORT_SYMBOL(rtnl_notify);
728 void rtnl_set_sk_err(struct net *net, u32 group, int error)
730 struct sock *rtnl = net->rtnl;
732 netlink_set_err(rtnl, 0, group, error);
734 EXPORT_SYMBOL(rtnl_set_sk_err);
736 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
741 mx = nla_nest_start(skb, RTA_METRICS);
745 for (i = 0; i < RTAX_MAX; i++) {
747 if (i == RTAX_CC_ALGO - 1) {
748 char tmp[TCP_CA_NAME_MAX], *name;
750 name = tcp_ca_get_name_by_key(metrics[i], tmp);
753 if (nla_put_string(skb, i + 1, name))
754 goto nla_put_failure;
755 } else if (i == RTAX_FEATURES - 1) {
756 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
760 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
761 if (nla_put_u32(skb, i + 1, user_features))
762 goto nla_put_failure;
764 if (nla_put_u32(skb, i + 1, metrics[i]))
765 goto nla_put_failure;
772 nla_nest_cancel(skb, mx);
776 return nla_nest_end(skb, mx);
779 nla_nest_cancel(skb, mx);
782 EXPORT_SYMBOL(rtnetlink_put_metrics);
784 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
785 long expires, u32 error)
787 struct rta_cacheinfo ci = {
788 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
789 .rta_used = dst->__use,
790 .rta_clntref = atomic_read(&(dst->__refcnt)),
798 clock = jiffies_to_clock_t(abs(expires));
799 clock = min_t(unsigned long, clock, INT_MAX);
800 ci.rta_expires = (expires > 0) ? clock : -clock;
802 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
804 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
806 static void set_operstate(struct net_device *dev, unsigned char transition)
808 unsigned char operstate = dev->operstate;
810 switch (transition) {
812 if ((operstate == IF_OPER_DORMANT ||
813 operstate == IF_OPER_UNKNOWN) &&
815 operstate = IF_OPER_UP;
818 case IF_OPER_DORMANT:
819 if (operstate == IF_OPER_UP ||
820 operstate == IF_OPER_UNKNOWN)
821 operstate = IF_OPER_DORMANT;
825 if (dev->operstate != operstate) {
826 write_lock_bh(&dev_base_lock);
827 dev->operstate = operstate;
828 write_unlock_bh(&dev_base_lock);
829 netdev_state_change(dev);
833 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
835 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
836 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
839 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
840 const struct ifinfomsg *ifm)
842 unsigned int flags = ifm->ifi_flags;
844 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
846 flags = (flags & ifm->ifi_change) |
847 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
852 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
853 const struct rtnl_link_stats64 *b)
855 a->rx_packets = b->rx_packets;
856 a->tx_packets = b->tx_packets;
857 a->rx_bytes = b->rx_bytes;
858 a->tx_bytes = b->tx_bytes;
859 a->rx_errors = b->rx_errors;
860 a->tx_errors = b->tx_errors;
861 a->rx_dropped = b->rx_dropped;
862 a->tx_dropped = b->tx_dropped;
864 a->multicast = b->multicast;
865 a->collisions = b->collisions;
867 a->rx_length_errors = b->rx_length_errors;
868 a->rx_over_errors = b->rx_over_errors;
869 a->rx_crc_errors = b->rx_crc_errors;
870 a->rx_frame_errors = b->rx_frame_errors;
871 a->rx_fifo_errors = b->rx_fifo_errors;
872 a->rx_missed_errors = b->rx_missed_errors;
874 a->tx_aborted_errors = b->tx_aborted_errors;
875 a->tx_carrier_errors = b->tx_carrier_errors;
876 a->tx_fifo_errors = b->tx_fifo_errors;
877 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
878 a->tx_window_errors = b->tx_window_errors;
880 a->rx_compressed = b->rx_compressed;
881 a->tx_compressed = b->tx_compressed;
883 a->rx_nohandler = b->rx_nohandler;
887 static inline int rtnl_vfinfo_size(const struct net_device *dev,
890 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
891 int num_vfs = dev_num_vf(dev->dev.parent);
892 size_t size = nla_total_size(0);
895 nla_total_size(sizeof(struct ifla_vf_mac)) +
896 nla_total_size(sizeof(struct ifla_vf_vlan)) +
897 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
898 nla_total_size(MAX_VLAN_LIST_LEN *
899 sizeof(struct ifla_vf_vlan_info)) +
900 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
901 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
902 nla_total_size(sizeof(struct ifla_vf_rate)) +
903 nla_total_size(sizeof(struct ifla_vf_link_state)) +
904 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
905 nla_total_size(0) + /* nest IFLA_VF_STATS */
906 /* IFLA_VF_STATS_RX_PACKETS */
907 nla_total_size_64bit(sizeof(__u64)) +
908 /* IFLA_VF_STATS_TX_PACKETS */
909 nla_total_size_64bit(sizeof(__u64)) +
910 /* IFLA_VF_STATS_RX_BYTES */
911 nla_total_size_64bit(sizeof(__u64)) +
912 /* IFLA_VF_STATS_TX_BYTES */
913 nla_total_size_64bit(sizeof(__u64)) +
914 /* IFLA_VF_STATS_BROADCAST */
915 nla_total_size_64bit(sizeof(__u64)) +
916 /* IFLA_VF_STATS_MULTICAST */
917 nla_total_size_64bit(sizeof(__u64)) +
918 /* IFLA_VF_STATS_RX_DROPPED */
919 nla_total_size_64bit(sizeof(__u64)) +
920 /* IFLA_VF_STATS_TX_DROPPED */
921 nla_total_size_64bit(sizeof(__u64)) +
922 nla_total_size(sizeof(struct ifla_vf_trust)));
928 static size_t rtnl_port_size(const struct net_device *dev,
931 size_t port_size = nla_total_size(4) /* PORT_VF */
932 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
933 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
934 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
935 + nla_total_size(1) /* PROT_VDP_REQUEST */
936 + nla_total_size(2); /* PORT_VDP_RESPONSE */
937 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
938 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
940 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
943 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
944 !(ext_filter_mask & RTEXT_FILTER_VF))
946 if (dev_num_vf(dev->dev.parent))
947 return port_self_size + vf_ports_size +
948 vf_port_size * dev_num_vf(dev->dev.parent);
950 return port_self_size;
953 static size_t rtnl_xdp_size(void)
955 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
956 nla_total_size(1) + /* XDP_ATTACHED */
957 nla_total_size(4); /* XDP_PROG_ID */
962 static noinline size_t if_nlmsg_size(const struct net_device *dev,
965 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
966 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
967 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
968 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
969 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
970 + nla_total_size(sizeof(struct rtnl_link_stats))
971 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
972 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
973 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
974 + nla_total_size(4) /* IFLA_TXQLEN */
975 + nla_total_size(4) /* IFLA_WEIGHT */
976 + nla_total_size(4) /* IFLA_MTU */
977 + nla_total_size(4) /* IFLA_LINK */
978 + nla_total_size(4) /* IFLA_MASTER */
979 + nla_total_size(1) /* IFLA_CARRIER */
980 + nla_total_size(4) /* IFLA_PROMISCUITY */
981 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
982 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
983 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
984 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
985 + nla_total_size(1) /* IFLA_OPERSTATE */
986 + nla_total_size(1) /* IFLA_LINKMODE */
987 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
988 + nla_total_size(4) /* IFLA_LINK_NETNSID */
989 + nla_total_size(4) /* IFLA_GROUP */
990 + nla_total_size(ext_filter_mask
991 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
992 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
993 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
994 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
995 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
996 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
997 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
998 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
999 + rtnl_xdp_size() /* IFLA_XDP */
1000 + nla_total_size(4) /* IFLA_EVENT */
1001 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1002 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1003 + nla_total_size(1) /* IFLA_PROTO_DOWN */
1004 + nla_total_size(4) /* IFLA_IF_NETNSID */
1005 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1006 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1010 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1012 struct nlattr *vf_ports;
1013 struct nlattr *vf_port;
1017 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
1021 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1022 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
1024 goto nla_put_failure;
1025 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1026 goto nla_put_failure;
1027 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1028 if (err == -EMSGSIZE)
1029 goto nla_put_failure;
1031 nla_nest_cancel(skb, vf_port);
1034 nla_nest_end(skb, vf_port);
1037 nla_nest_end(skb, vf_ports);
1042 nla_nest_cancel(skb, vf_ports);
1046 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1048 struct nlattr *port_self;
1051 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
1055 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1057 nla_nest_cancel(skb, port_self);
1058 return (err == -EMSGSIZE) ? err : 0;
1061 nla_nest_end(skb, port_self);
1066 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1067 u32 ext_filter_mask)
1071 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1072 !(ext_filter_mask & RTEXT_FILTER_VF))
1075 err = rtnl_port_self_fill(skb, dev);
1079 if (dev_num_vf(dev->dev.parent)) {
1080 err = rtnl_vf_ports_fill(skb, dev);
1088 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1091 struct netdev_phys_item_id ppid;
1093 err = dev_get_phys_port_id(dev, &ppid);
1095 if (err == -EOPNOTSUPP)
1100 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1106 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1108 char name[IFNAMSIZ];
1111 err = dev_get_phys_port_name(dev, name, sizeof(name));
1113 if (err == -EOPNOTSUPP)
1118 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1124 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1127 struct switchdev_attr attr = {
1129 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1130 .flags = SWITCHDEV_F_NO_RECURSE,
1133 err = switchdev_port_attr_get(dev, &attr);
1135 if (err == -EOPNOTSUPP)
1140 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1147 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1148 struct net_device *dev)
1150 struct rtnl_link_stats64 *sp;
1151 struct nlattr *attr;
1153 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1154 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1158 sp = nla_data(attr);
1159 dev_get_stats(dev, sp);
1161 attr = nla_reserve(skb, IFLA_STATS,
1162 sizeof(struct rtnl_link_stats));
1166 copy_rtnl_link_stats(nla_data(attr), sp);
1171 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1172 struct net_device *dev,
1174 struct nlattr *vfinfo)
1176 struct ifla_vf_rss_query_en vf_rss_query_en;
1177 struct nlattr *vf, *vfstats, *vfvlanlist;
1178 struct ifla_vf_link_state vf_linkstate;
1179 struct ifla_vf_vlan_info vf_vlan_info;
1180 struct ifla_vf_spoofchk vf_spoofchk;
1181 struct ifla_vf_tx_rate vf_tx_rate;
1182 struct ifla_vf_stats vf_stats;
1183 struct ifla_vf_trust vf_trust;
1184 struct ifla_vf_vlan vf_vlan;
1185 struct ifla_vf_rate vf_rate;
1186 struct ifla_vf_mac vf_mac;
1187 struct ifla_vf_info ivi;
1189 memset(&ivi, 0, sizeof(ivi));
1191 /* Not all SR-IOV capable drivers support the
1192 * spoofcheck and "RSS query enable" query. Preset to
1193 * -1 so the user space tool can detect that the driver
1194 * didn't report anything.
1197 ivi.rss_query_en = -1;
1199 /* The default value for VF link state is "auto"
1200 * IFLA_VF_LINK_STATE_AUTO which equals zero
1203 /* VLAN Protocol by default is 802.1Q */
1204 ivi.vlan_proto = htons(ETH_P_8021Q);
1205 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1208 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1217 vf_rss_query_en.vf =
1218 vf_trust.vf = ivi.vf;
1220 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1221 vf_vlan.vlan = ivi.vlan;
1222 vf_vlan.qos = ivi.qos;
1223 vf_vlan_info.vlan = ivi.vlan;
1224 vf_vlan_info.qos = ivi.qos;
1225 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1226 vf_tx_rate.rate = ivi.max_tx_rate;
1227 vf_rate.min_tx_rate = ivi.min_tx_rate;
1228 vf_rate.max_tx_rate = ivi.max_tx_rate;
1229 vf_spoofchk.setting = ivi.spoofchk;
1230 vf_linkstate.link_state = ivi.linkstate;
1231 vf_rss_query_en.setting = ivi.rss_query_en;
1232 vf_trust.setting = ivi.trusted;
1233 vf = nla_nest_start(skb, IFLA_VF_INFO);
1235 goto nla_put_vfinfo_failure;
1236 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1237 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1238 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1240 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1242 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1244 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1246 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1247 sizeof(vf_rss_query_en),
1248 &vf_rss_query_en) ||
1249 nla_put(skb, IFLA_VF_TRUST,
1250 sizeof(vf_trust), &vf_trust))
1251 goto nla_put_vf_failure;
1252 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1254 goto nla_put_vf_failure;
1255 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1257 nla_nest_cancel(skb, vfvlanlist);
1258 goto nla_put_vf_failure;
1260 nla_nest_end(skb, vfvlanlist);
1261 memset(&vf_stats, 0, sizeof(vf_stats));
1262 if (dev->netdev_ops->ndo_get_vf_stats)
1263 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1265 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1267 goto nla_put_vf_failure;
1268 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1269 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1270 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1271 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1272 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1273 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1274 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1275 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1276 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1277 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1278 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1279 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1280 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1281 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1282 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1283 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1284 nla_nest_cancel(skb, vfstats);
1285 goto nla_put_vf_failure;
1287 nla_nest_end(skb, vfstats);
1288 nla_nest_end(skb, vf);
1292 nla_nest_cancel(skb, vf);
1293 nla_put_vfinfo_failure:
1294 nla_nest_cancel(skb, vfinfo);
1298 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1299 struct net_device *dev,
1300 u32 ext_filter_mask)
1302 struct nlattr *vfinfo;
1305 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1308 num_vfs = dev_num_vf(dev->dev.parent);
1309 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1312 if (!dev->netdev_ops->ndo_get_vf_config)
1315 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1319 for (i = 0; i < num_vfs; i++) {
1320 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1324 nla_nest_end(skb, vfinfo);
1328 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1330 struct rtnl_link_ifmap map;
1332 memset(&map, 0, sizeof(map));
1333 map.mem_start = dev->mem_start;
1334 map.mem_end = dev->mem_end;
1335 map.base_addr = dev->base_addr;
1338 map.port = dev->if_port;
1340 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1346 static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1348 const struct net_device_ops *ops = dev->netdev_ops;
1349 const struct bpf_prog *generic_xdp_prog;
1350 struct netdev_bpf xdp;
1355 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1356 if (generic_xdp_prog) {
1357 *prog_id = generic_xdp_prog->aux->id;
1358 return XDP_ATTACHED_SKB;
1361 return XDP_ATTACHED_NONE;
1363 __dev_xdp_query(dev, ops->ndo_bpf, &xdp);
1364 *prog_id = xdp.prog_id;
1366 return xdp.prog_attached;
1369 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1375 xdp = nla_nest_start(skb, IFLA_XDP);
1379 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1380 rtnl_xdp_attached_mode(dev, &prog_id));
1385 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1390 nla_nest_end(skb, xdp);
1394 nla_nest_cancel(skb, xdp);
1398 static u32 rtnl_get_event(unsigned long event)
1400 u32 rtnl_event_type = IFLA_EVENT_NONE;
1404 rtnl_event_type = IFLA_EVENT_REBOOT;
1406 case NETDEV_FEAT_CHANGE:
1407 rtnl_event_type = IFLA_EVENT_FEATURES;
1409 case NETDEV_BONDING_FAILOVER:
1410 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1412 case NETDEV_NOTIFY_PEERS:
1413 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1415 case NETDEV_RESEND_IGMP:
1416 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1418 case NETDEV_CHANGEINFODATA:
1419 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1425 return rtnl_event_type;
1428 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1430 const struct net_device *upper_dev;
1435 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1437 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1443 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
1445 int ifindex = dev_get_iflink(dev);
1447 if (dev->ifindex == ifindex)
1450 return nla_put_u32(skb, IFLA_LINK, ifindex);
1453 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1454 struct net_device *dev)
1459 ret = dev_get_alias(dev, buf, sizeof(buf));
1460 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1463 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1464 const struct net_device *dev,
1465 struct net *src_net)
1467 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1468 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1470 if (!net_eq(dev_net(dev), link_net)) {
1471 int id = peernet2id_alloc(src_net, link_net);
1473 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1481 static int rtnl_fill_link_af(struct sk_buff *skb,
1482 const struct net_device *dev,
1483 u32 ext_filter_mask)
1485 const struct rtnl_af_ops *af_ops;
1486 struct nlattr *af_spec;
1488 af_spec = nla_nest_start(skb, IFLA_AF_SPEC);
1492 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1496 if (!af_ops->fill_link_af)
1499 af = nla_nest_start(skb, af_ops->family);
1503 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1505 * Caller may return ENODATA to indicate that there
1506 * was no data to be dumped. This is not an error, it
1507 * means we should trim the attribute header and
1510 if (err == -ENODATA)
1511 nla_nest_cancel(skb, af);
1515 nla_nest_end(skb, af);
1518 nla_nest_end(skb, af_spec);
1522 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1523 struct net_device *dev, struct net *src_net,
1524 int type, u32 pid, u32 seq, u32 change,
1525 unsigned int flags, u32 ext_filter_mask,
1526 u32 event, int *new_nsid, int new_ifindex,
1529 struct ifinfomsg *ifm;
1530 struct nlmsghdr *nlh;
1533 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1537 ifm = nlmsg_data(nlh);
1538 ifm->ifi_family = AF_UNSPEC;
1540 ifm->ifi_type = dev->type;
1541 ifm->ifi_index = dev->ifindex;
1542 ifm->ifi_flags = dev_get_flags(dev);
1543 ifm->ifi_change = change;
1545 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid))
1546 goto nla_put_failure;
1548 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1549 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1550 nla_put_u8(skb, IFLA_OPERSTATE,
1551 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1552 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1553 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1554 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1555 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1556 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1557 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1558 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1560 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1562 nla_put_iflink(skb, dev) ||
1563 put_master_ifindex(skb, dev) ||
1564 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1566 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1567 nla_put_ifalias(skb, dev) ||
1568 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1569 atomic_read(&dev->carrier_up_count) +
1570 atomic_read(&dev->carrier_down_count)) ||
1571 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
1572 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1573 atomic_read(&dev->carrier_up_count)) ||
1574 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1575 atomic_read(&dev->carrier_down_count)))
1576 goto nla_put_failure;
1578 if (event != IFLA_EVENT_NONE) {
1579 if (nla_put_u32(skb, IFLA_EVENT, event))
1580 goto nla_put_failure;
1583 if (rtnl_fill_link_ifmap(skb, dev))
1584 goto nla_put_failure;
1586 if (dev->addr_len) {
1587 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1588 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1589 goto nla_put_failure;
1592 if (rtnl_phys_port_id_fill(skb, dev))
1593 goto nla_put_failure;
1595 if (rtnl_phys_port_name_fill(skb, dev))
1596 goto nla_put_failure;
1598 if (rtnl_phys_switch_id_fill(skb, dev))
1599 goto nla_put_failure;
1601 if (rtnl_fill_stats(skb, dev))
1602 goto nla_put_failure;
1604 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1605 goto nla_put_failure;
1607 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1608 goto nla_put_failure;
1610 if (rtnl_xdp_fill(skb, dev))
1611 goto nla_put_failure;
1613 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1614 if (rtnl_link_fill(skb, dev) < 0)
1615 goto nla_put_failure;
1618 if (rtnl_fill_link_netnsid(skb, dev, src_net))
1619 goto nla_put_failure;
1622 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1623 goto nla_put_failure;
1625 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1626 goto nla_put_failure;
1630 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1631 goto nla_put_failure_rcu;
1634 nlmsg_end(skb, nlh);
1637 nla_put_failure_rcu:
1640 nlmsg_cancel(skb, nlh);
1644 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1645 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1646 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1647 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1648 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1649 [IFLA_MTU] = { .type = NLA_U32 },
1650 [IFLA_LINK] = { .type = NLA_U32 },
1651 [IFLA_MASTER] = { .type = NLA_U32 },
1652 [IFLA_CARRIER] = { .type = NLA_U8 },
1653 [IFLA_TXQLEN] = { .type = NLA_U32 },
1654 [IFLA_WEIGHT] = { .type = NLA_U32 },
1655 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1656 [IFLA_LINKMODE] = { .type = NLA_U8 },
1657 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1658 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1659 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1660 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1661 * allow 0-length string (needed to remove an alias).
1663 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1664 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1665 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1666 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1667 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1668 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1669 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1670 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1671 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1672 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1673 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1674 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1675 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1676 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1677 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1678 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1679 [IFLA_XDP] = { .type = NLA_NESTED },
1680 [IFLA_EVENT] = { .type = NLA_U32 },
1681 [IFLA_GROUP] = { .type = NLA_U32 },
1682 [IFLA_IF_NETNSID] = { .type = NLA_S32 },
1683 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1684 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1687 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1688 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1689 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1690 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1691 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1694 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1695 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1696 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1697 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1698 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1699 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1700 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1701 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1702 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1703 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1704 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1705 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1706 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1709 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1710 [IFLA_PORT_VF] = { .type = NLA_U32 },
1711 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1712 .len = PORT_PROFILE_MAX },
1713 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1714 .len = PORT_UUID_MAX },
1715 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1716 .len = PORT_UUID_MAX },
1717 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1718 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1720 /* Unused, but we need to keep it here since user space could
1721 * fill it. It's also broken with regard to NLA_BINARY use in
1722 * combination with structs.
1724 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1725 .len = sizeof(struct ifla_port_vsi) },
1728 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1729 [IFLA_XDP_FD] = { .type = NLA_S32 },
1730 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1731 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1732 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1735 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1737 const struct rtnl_link_ops *ops = NULL;
1738 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1740 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1741 ifla_info_policy, NULL) < 0)
1744 if (linfo[IFLA_INFO_KIND]) {
1745 char kind[MODULE_NAME_LEN];
1747 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1748 ops = rtnl_link_ops_get(kind);
1754 static bool link_master_filtered(struct net_device *dev, int master_idx)
1756 struct net_device *master;
1761 master = netdev_master_upper_dev_get(dev);
1762 if (!master || master->ifindex != master_idx)
1768 static bool link_kind_filtered(const struct net_device *dev,
1769 const struct rtnl_link_ops *kind_ops)
1771 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1777 static bool link_dump_filtered(struct net_device *dev,
1779 const struct rtnl_link_ops *kind_ops)
1781 if (link_master_filtered(dev, master_idx) ||
1782 link_kind_filtered(dev, kind_ops))
1788 static struct net *get_target_net(struct sock *sk, int netnsid)
1792 net = get_net_ns_by_id(sock_net(sk), netnsid);
1794 return ERR_PTR(-EINVAL);
1796 /* For now, the caller is required to have CAP_NET_ADMIN in
1797 * the user namespace owning the target net ns.
1799 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1801 return ERR_PTR(-EACCES);
1806 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1808 struct net *net = sock_net(skb->sk);
1809 struct net *tgt_net = net;
1812 struct net_device *dev;
1813 struct hlist_head *head;
1814 struct nlattr *tb[IFLA_MAX+1];
1815 u32 ext_filter_mask = 0;
1816 const struct rtnl_link_ops *kind_ops = NULL;
1817 unsigned int flags = NLM_F_MULTI;
1824 s_idx = cb->args[1];
1826 /* A hack to preserve kernel<->userspace interface.
1827 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1828 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1829 * what iproute2 < v3.9.0 used.
1830 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1831 * attribute, its netlink message is shorter than struct ifinfomsg.
1833 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1834 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1836 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1837 ifla_policy, NULL) >= 0) {
1838 if (tb[IFLA_IF_NETNSID]) {
1839 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1840 tgt_net = get_target_net(skb->sk, netnsid);
1841 if (IS_ERR(tgt_net)) {
1847 if (tb[IFLA_EXT_MASK])
1848 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1850 if (tb[IFLA_MASTER])
1851 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1853 if (tb[IFLA_LINKINFO])
1854 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1856 if (master_idx || kind_ops)
1857 flags |= NLM_F_DUMP_FILTERED;
1860 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1862 head = &tgt_net->dev_index_head[h];
1863 hlist_for_each_entry(dev, head, index_hlist) {
1864 if (link_dump_filtered(dev, master_idx, kind_ops))
1868 err = rtnl_fill_ifinfo(skb, dev, net,
1870 NETLINK_CB(cb->skb).portid,
1871 cb->nlh->nlmsg_seq, 0,
1873 ext_filter_mask, 0, NULL, 0,
1877 if (likely(skb->len))
1891 cb->seq = net->dev_base_seq;
1892 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1899 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1900 struct netlink_ext_ack *exterr)
1902 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
1904 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1906 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1909 /* Examine the link attributes and figure out which
1910 * network namespace we are talking about.
1912 if (tb[IFLA_NET_NS_PID])
1913 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1914 else if (tb[IFLA_NET_NS_FD])
1915 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1917 net = get_net(src_net);
1920 EXPORT_SYMBOL(rtnl_link_get_net);
1922 /* Figure out which network namespace we are talking about by
1923 * examining the link attributes in the following order:
1925 * 1. IFLA_NET_NS_PID
1927 * 3. IFLA_IF_NETNSID
1929 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
1930 struct nlattr *tb[])
1934 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
1935 return rtnl_link_get_net(src_net, tb);
1937 if (!tb[IFLA_IF_NETNSID])
1938 return get_net(src_net);
1940 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
1942 return ERR_PTR(-EINVAL);
1947 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
1948 struct net *src_net,
1949 struct nlattr *tb[], int cap)
1953 net = rtnl_link_get_net_by_nlattr(src_net, tb);
1957 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
1959 return ERR_PTR(-EPERM);
1965 /* Verify that rtnetlink requests do not pass additional properties
1966 * potentially referring to different network namespaces.
1968 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
1969 struct netlink_ext_ack *extack,
1973 if (netns_id_only) {
1974 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
1977 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
1981 if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
1984 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
1987 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
1993 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
1997 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2000 if (tb[IFLA_ADDRESS] &&
2001 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2004 if (tb[IFLA_BROADCAST] &&
2005 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2009 if (tb[IFLA_AF_SPEC]) {
2013 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2014 const struct rtnl_af_ops *af_ops;
2017 af_ops = rtnl_af_lookup(nla_type(af));
2020 return -EAFNOSUPPORT;
2023 if (!af_ops->set_link_af) {
2028 if (af_ops->validate_link_af) {
2029 err = af_ops->validate_link_af(dev, af);
2043 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2046 const struct net_device_ops *ops = dev->netdev_ops;
2048 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2051 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2053 if (dev->type != ARPHRD_INFINIBAND)
2056 return handle_infiniband_guid(dev, ivt, guid_type);
2059 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2061 const struct net_device_ops *ops = dev->netdev_ops;
2064 if (tb[IFLA_VF_MAC]) {
2065 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2068 if (ops->ndo_set_vf_mac)
2069 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2075 if (tb[IFLA_VF_VLAN]) {
2076 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2079 if (ops->ndo_set_vf_vlan)
2080 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2082 htons(ETH_P_8021Q));
2087 if (tb[IFLA_VF_VLAN_LIST]) {
2088 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2089 struct nlattr *attr;
2093 if (!ops->ndo_set_vf_vlan)
2096 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2097 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2098 nla_len(attr) < NLA_HDRLEN) {
2101 if (len >= MAX_VLAN_LIST_LEN)
2103 ivvl[len] = nla_data(attr);
2110 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2111 ivvl[0]->qos, ivvl[0]->vlan_proto);
2116 if (tb[IFLA_VF_TX_RATE]) {
2117 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2118 struct ifla_vf_info ivf;
2121 if (ops->ndo_get_vf_config)
2122 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2127 if (ops->ndo_set_vf_rate)
2128 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2135 if (tb[IFLA_VF_RATE]) {
2136 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2139 if (ops->ndo_set_vf_rate)
2140 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2147 if (tb[IFLA_VF_SPOOFCHK]) {
2148 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2151 if (ops->ndo_set_vf_spoofchk)
2152 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2158 if (tb[IFLA_VF_LINK_STATE]) {
2159 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2162 if (ops->ndo_set_vf_link_state)
2163 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2169 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2170 struct ifla_vf_rss_query_en *ivrssq_en;
2173 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2174 if (ops->ndo_set_vf_rss_query_en)
2175 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2176 ivrssq_en->setting);
2181 if (tb[IFLA_VF_TRUST]) {
2182 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2185 if (ops->ndo_set_vf_trust)
2186 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2191 if (tb[IFLA_VF_IB_NODE_GUID]) {
2192 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2194 if (!ops->ndo_set_vf_guid)
2197 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2200 if (tb[IFLA_VF_IB_PORT_GUID]) {
2201 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2203 if (!ops->ndo_set_vf_guid)
2206 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2212 static int do_set_master(struct net_device *dev, int ifindex,
2213 struct netlink_ext_ack *extack)
2215 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2216 const struct net_device_ops *ops;
2220 if (upper_dev->ifindex == ifindex)
2222 ops = upper_dev->netdev_ops;
2223 if (ops->ndo_del_slave) {
2224 err = ops->ndo_del_slave(upper_dev, dev);
2233 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2236 ops = upper_dev->netdev_ops;
2237 if (ops->ndo_add_slave) {
2238 err = ops->ndo_add_slave(upper_dev, dev, extack);
2248 #define DO_SETLINK_MODIFIED 0x01
2249 /* notify flag means notify + modified. */
2250 #define DO_SETLINK_NOTIFY 0x03
2251 static int do_setlink(const struct sk_buff *skb,
2252 struct net_device *dev, struct ifinfomsg *ifm,
2253 struct netlink_ext_ack *extack,
2254 struct nlattr **tb, char *ifname, int status)
2256 const struct net_device_ops *ops = dev->netdev_ops;
2259 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
2260 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2267 err = dev_change_net_namespace(dev, net, ifname);
2271 status |= DO_SETLINK_MODIFIED;
2275 struct rtnl_link_ifmap *u_map;
2278 if (!ops->ndo_set_config) {
2283 if (!netif_device_present(dev)) {
2288 u_map = nla_data(tb[IFLA_MAP]);
2289 k_map.mem_start = (unsigned long) u_map->mem_start;
2290 k_map.mem_end = (unsigned long) u_map->mem_end;
2291 k_map.base_addr = (unsigned short) u_map->base_addr;
2292 k_map.irq = (unsigned char) u_map->irq;
2293 k_map.dma = (unsigned char) u_map->dma;
2294 k_map.port = (unsigned char) u_map->port;
2296 err = ops->ndo_set_config(dev, &k_map);
2300 status |= DO_SETLINK_NOTIFY;
2303 if (tb[IFLA_ADDRESS]) {
2304 struct sockaddr *sa;
2307 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2309 sa = kmalloc(len, GFP_KERNEL);
2314 sa->sa_family = dev->type;
2315 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2317 err = dev_set_mac_address(dev, sa);
2321 status |= DO_SETLINK_MODIFIED;
2325 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2328 status |= DO_SETLINK_MODIFIED;
2331 if (tb[IFLA_GROUP]) {
2332 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2333 status |= DO_SETLINK_NOTIFY;
2337 * Interface selected by interface index but interface
2338 * name provided implies that a name change has been
2341 if (ifm->ifi_index > 0 && ifname[0]) {
2342 err = dev_change_name(dev, ifname);
2345 status |= DO_SETLINK_MODIFIED;
2348 if (tb[IFLA_IFALIAS]) {
2349 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2350 nla_len(tb[IFLA_IFALIAS]));
2353 status |= DO_SETLINK_NOTIFY;
2356 if (tb[IFLA_BROADCAST]) {
2357 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2358 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2361 if (ifm->ifi_flags || ifm->ifi_change) {
2362 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2367 if (tb[IFLA_MASTER]) {
2368 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2371 status |= DO_SETLINK_MODIFIED;
2374 if (tb[IFLA_CARRIER]) {
2375 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2378 status |= DO_SETLINK_MODIFIED;
2381 if (tb[IFLA_TXQLEN]) {
2382 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2384 err = dev_change_tx_queue_len(dev, value);
2387 status |= DO_SETLINK_MODIFIED;
2390 if (tb[IFLA_GSO_MAX_SIZE]) {
2391 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2393 if (max_size > GSO_MAX_SIZE) {
2398 if (dev->gso_max_size ^ max_size) {
2399 netif_set_gso_max_size(dev, max_size);
2400 status |= DO_SETLINK_MODIFIED;
2404 if (tb[IFLA_GSO_MAX_SEGS]) {
2405 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2407 if (max_segs > GSO_MAX_SEGS) {
2412 if (dev->gso_max_segs ^ max_segs) {
2413 dev->gso_max_segs = max_segs;
2414 status |= DO_SETLINK_MODIFIED;
2418 if (tb[IFLA_OPERSTATE])
2419 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2421 if (tb[IFLA_LINKMODE]) {
2422 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2424 write_lock_bh(&dev_base_lock);
2425 if (dev->link_mode ^ value)
2426 status |= DO_SETLINK_NOTIFY;
2427 dev->link_mode = value;
2428 write_unlock_bh(&dev_base_lock);
2431 if (tb[IFLA_VFINFO_LIST]) {
2432 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2433 struct nlattr *attr;
2436 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2437 if (nla_type(attr) != IFLA_VF_INFO ||
2438 nla_len(attr) < NLA_HDRLEN) {
2442 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2443 ifla_vf_policy, NULL);
2446 err = do_setvfinfo(dev, vfinfo);
2449 status |= DO_SETLINK_NOTIFY;
2454 if (tb[IFLA_VF_PORTS]) {
2455 struct nlattr *port[IFLA_PORT_MAX+1];
2456 struct nlattr *attr;
2461 if (!ops->ndo_set_vf_port)
2464 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2465 if (nla_type(attr) != IFLA_VF_PORT ||
2466 nla_len(attr) < NLA_HDRLEN) {
2470 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2471 ifla_port_policy, NULL);
2474 if (!port[IFLA_PORT_VF]) {
2478 vf = nla_get_u32(port[IFLA_PORT_VF]);
2479 err = ops->ndo_set_vf_port(dev, vf, port);
2482 status |= DO_SETLINK_NOTIFY;
2487 if (tb[IFLA_PORT_SELF]) {
2488 struct nlattr *port[IFLA_PORT_MAX+1];
2490 err = nla_parse_nested(port, IFLA_PORT_MAX,
2491 tb[IFLA_PORT_SELF], ifla_port_policy,
2497 if (ops->ndo_set_vf_port)
2498 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2501 status |= DO_SETLINK_NOTIFY;
2504 if (tb[IFLA_AF_SPEC]) {
2508 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2509 const struct rtnl_af_ops *af_ops;
2513 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2515 err = af_ops->set_link_af(dev, af);
2522 status |= DO_SETLINK_NOTIFY;
2527 if (tb[IFLA_PROTO_DOWN]) {
2528 err = dev_change_proto_down(dev,
2529 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2532 status |= DO_SETLINK_NOTIFY;
2536 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2539 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2540 ifla_xdp_policy, NULL);
2544 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2549 if (xdp[IFLA_XDP_FLAGS]) {
2550 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2551 if (xdp_flags & ~XDP_FLAGS_MASK) {
2555 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2561 if (xdp[IFLA_XDP_FD]) {
2562 err = dev_change_xdp_fd(dev, extack,
2563 nla_get_s32(xdp[IFLA_XDP_FD]),
2567 status |= DO_SETLINK_NOTIFY;
2572 if (status & DO_SETLINK_MODIFIED) {
2573 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2574 netdev_state_change(dev);
2577 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2584 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2585 struct netlink_ext_ack *extack)
2587 struct net *net = sock_net(skb->sk);
2588 struct ifinfomsg *ifm;
2589 struct net_device *dev;
2591 struct nlattr *tb[IFLA_MAX+1];
2592 char ifname[IFNAMSIZ];
2594 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2599 err = rtnl_ensure_unique_netns(tb, extack, false);
2603 if (tb[IFLA_IFNAME])
2604 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2609 ifm = nlmsg_data(nlh);
2610 if (ifm->ifi_index > 0)
2611 dev = __dev_get_by_index(net, ifm->ifi_index);
2612 else if (tb[IFLA_IFNAME])
2613 dev = __dev_get_by_name(net, ifname);
2622 err = validate_linkmsg(dev, tb);
2626 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2631 static int rtnl_group_dellink(const struct net *net, int group)
2633 struct net_device *dev, *aux;
2634 LIST_HEAD(list_kill);
2640 for_each_netdev(net, dev) {
2641 if (dev->group == group) {
2642 const struct rtnl_link_ops *ops;
2645 ops = dev->rtnl_link_ops;
2646 if (!ops || !ops->dellink)
2654 for_each_netdev_safe(net, dev, aux) {
2655 if (dev->group == group) {
2656 const struct rtnl_link_ops *ops;
2658 ops = dev->rtnl_link_ops;
2659 ops->dellink(dev, &list_kill);
2662 unregister_netdevice_many(&list_kill);
2667 int rtnl_delete_link(struct net_device *dev)
2669 const struct rtnl_link_ops *ops;
2670 LIST_HEAD(list_kill);
2672 ops = dev->rtnl_link_ops;
2673 if (!ops || !ops->dellink)
2676 ops->dellink(dev, &list_kill);
2677 unregister_netdevice_many(&list_kill);
2681 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2683 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2684 struct netlink_ext_ack *extack)
2686 struct net *net = sock_net(skb->sk);
2687 struct net *tgt_net = net;
2688 struct net_device *dev = NULL;
2689 struct ifinfomsg *ifm;
2690 char ifname[IFNAMSIZ];
2691 struct nlattr *tb[IFLA_MAX+1];
2695 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2699 err = rtnl_ensure_unique_netns(tb, extack, true);
2703 if (tb[IFLA_IFNAME])
2704 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2706 if (tb[IFLA_IF_NETNSID]) {
2707 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
2708 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
2709 if (IS_ERR(tgt_net))
2710 return PTR_ERR(tgt_net);
2714 ifm = nlmsg_data(nlh);
2715 if (ifm->ifi_index > 0)
2716 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
2717 else if (tb[IFLA_IFNAME])
2718 dev = __dev_get_by_name(tgt_net, ifname);
2719 else if (tb[IFLA_GROUP])
2720 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
2725 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
2731 err = rtnl_delete_link(dev);
2740 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2742 unsigned int old_flags;
2745 old_flags = dev->flags;
2746 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2747 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2752 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2754 __dev_notify_flags(dev, old_flags, ~0U);
2757 EXPORT_SYMBOL(rtnl_configure_link);
2759 struct net_device *rtnl_create_link(struct net *net,
2760 const char *ifname, unsigned char name_assign_type,
2761 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2763 struct net_device *dev;
2764 unsigned int num_tx_queues = 1;
2765 unsigned int num_rx_queues = 1;
2767 if (tb[IFLA_NUM_TX_QUEUES])
2768 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2769 else if (ops->get_num_tx_queues)
2770 num_tx_queues = ops->get_num_tx_queues();
2772 if (tb[IFLA_NUM_RX_QUEUES])
2773 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2774 else if (ops->get_num_rx_queues)
2775 num_rx_queues = ops->get_num_rx_queues();
2777 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2778 ops->setup, num_tx_queues, num_rx_queues);
2780 return ERR_PTR(-ENOMEM);
2782 dev_net_set(dev, net);
2783 dev->rtnl_link_ops = ops;
2784 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2787 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2788 if (tb[IFLA_ADDRESS]) {
2789 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2790 nla_len(tb[IFLA_ADDRESS]));
2791 dev->addr_assign_type = NET_ADDR_SET;
2793 if (tb[IFLA_BROADCAST])
2794 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2795 nla_len(tb[IFLA_BROADCAST]));
2796 if (tb[IFLA_TXQLEN])
2797 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2798 if (tb[IFLA_OPERSTATE])
2799 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2800 if (tb[IFLA_LINKMODE])
2801 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2803 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2804 if (tb[IFLA_GSO_MAX_SIZE])
2805 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
2806 if (tb[IFLA_GSO_MAX_SEGS])
2807 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2811 EXPORT_SYMBOL(rtnl_create_link);
2813 static int rtnl_group_changelink(const struct sk_buff *skb,
2814 struct net *net, int group,
2815 struct ifinfomsg *ifm,
2816 struct netlink_ext_ack *extack,
2819 struct net_device *dev, *aux;
2822 for_each_netdev_safe(net, dev, aux) {
2823 if (dev->group == group) {
2824 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2833 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2834 struct netlink_ext_ack *extack)
2836 struct net *net = sock_net(skb->sk);
2837 const struct rtnl_link_ops *ops;
2838 const struct rtnl_link_ops *m_ops = NULL;
2839 struct net_device *dev;
2840 struct net_device *master_dev = NULL;
2841 struct ifinfomsg *ifm;
2842 char kind[MODULE_NAME_LEN];
2843 char ifname[IFNAMSIZ];
2844 struct nlattr *tb[IFLA_MAX+1];
2845 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2846 unsigned char name_assign_type = NET_NAME_USER;
2849 #ifdef CONFIG_MODULES
2852 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2856 err = rtnl_ensure_unique_netns(tb, extack, false);
2860 if (tb[IFLA_IFNAME])
2861 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2865 ifm = nlmsg_data(nlh);
2866 if (ifm->ifi_index > 0)
2867 dev = __dev_get_by_index(net, ifm->ifi_index);
2870 dev = __dev_get_by_name(net, ifname);
2876 master_dev = netdev_master_upper_dev_get(dev);
2878 m_ops = master_dev->rtnl_link_ops;
2881 err = validate_linkmsg(dev, tb);
2885 if (tb[IFLA_LINKINFO]) {
2886 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2887 tb[IFLA_LINKINFO], ifla_info_policy,
2892 memset(linkinfo, 0, sizeof(linkinfo));
2894 if (linkinfo[IFLA_INFO_KIND]) {
2895 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2896 ops = rtnl_link_ops_get(kind);
2903 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2904 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2905 struct nlattr **data = NULL;
2906 struct nlattr **slave_data = NULL;
2907 struct net *dest_net, *link_net = NULL;
2910 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2911 err = nla_parse_nested(attr, ops->maxtype,
2912 linkinfo[IFLA_INFO_DATA],
2918 if (ops->validate) {
2919 err = ops->validate(tb, data, extack);
2926 if (m_ops->slave_maxtype &&
2927 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2928 err = nla_parse_nested(slave_attr,
2929 m_ops->slave_maxtype,
2930 linkinfo[IFLA_INFO_SLAVE_DATA],
2931 m_ops->slave_policy,
2935 slave_data = slave_attr;
2942 if (nlh->nlmsg_flags & NLM_F_EXCL)
2944 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2947 if (linkinfo[IFLA_INFO_DATA]) {
2948 if (!ops || ops != dev->rtnl_link_ops ||
2952 err = ops->changelink(dev, tb, data, extack);
2955 status |= DO_SETLINK_NOTIFY;
2958 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2959 if (!m_ops || !m_ops->slave_changelink)
2962 err = m_ops->slave_changelink(master_dev, dev,
2967 status |= DO_SETLINK_NOTIFY;
2970 return do_setlink(skb, dev, ifm, extack, tb, ifname,
2974 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2975 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2976 return rtnl_group_changelink(skb, net,
2977 nla_get_u32(tb[IFLA_GROUP]),
2982 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
2986 #ifdef CONFIG_MODULES
2989 request_module("rtnl-link-%s", kind);
2991 ops = rtnl_link_ops_get(kind);
3003 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3004 name_assign_type = NET_NAME_ENUM;
3007 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3008 if (IS_ERR(dest_net))
3009 return PTR_ERR(dest_net);
3011 if (tb[IFLA_LINK_NETNSID]) {
3012 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3014 link_net = get_net_ns_by_id(dest_net, id);
3020 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3024 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3025 name_assign_type, ops, tb);
3031 dev->ifindex = ifm->ifi_index;
3034 err = ops->newlink(link_net ? : net, dev, tb, data,
3036 /* Drivers should call free_netdev() in ->destructor
3037 * and unregister it on failure after registration
3038 * so that device could be finally freed in rtnl_unlock.
3041 /* If device is not registered at all, free it now */
3042 if (dev->reg_state == NETREG_UNINITIALIZED)
3047 err = register_netdevice(dev);
3053 err = rtnl_configure_link(dev, ifm);
3055 goto out_unregister;
3057 err = dev_change_net_namespace(dev, dest_net, ifname);
3059 goto out_unregister;
3061 if (tb[IFLA_MASTER]) {
3062 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
3065 goto out_unregister;
3074 LIST_HEAD(list_kill);
3076 ops->dellink(dev, &list_kill);
3077 unregister_netdevice_many(&list_kill);
3079 unregister_netdevice(dev);
3085 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3086 struct netlink_ext_ack *extack)
3088 struct net *net = sock_net(skb->sk);
3089 struct net *tgt_net = net;
3090 struct ifinfomsg *ifm;
3091 char ifname[IFNAMSIZ];
3092 struct nlattr *tb[IFLA_MAX+1];
3093 struct net_device *dev = NULL;
3094 struct sk_buff *nskb;
3097 u32 ext_filter_mask = 0;
3099 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3103 err = rtnl_ensure_unique_netns(tb, extack, true);
3107 if (tb[IFLA_IF_NETNSID]) {
3108 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
3109 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
3110 if (IS_ERR(tgt_net))
3111 return PTR_ERR(tgt_net);
3114 if (tb[IFLA_IFNAME])
3115 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3117 if (tb[IFLA_EXT_MASK])
3118 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3121 ifm = nlmsg_data(nlh);
3122 if (ifm->ifi_index > 0)
3123 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3124 else if (tb[IFLA_IFNAME])
3125 dev = __dev_get_by_name(tgt_net, ifname);
3134 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3138 err = rtnl_fill_ifinfo(nskb, dev, net,
3139 RTM_NEWLINK, NETLINK_CB(skb).portid,
3140 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3141 0, NULL, 0, netnsid);
3143 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3144 WARN_ON(err == -EMSGSIZE);
3147 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3155 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3157 struct net *net = sock_net(skb->sk);
3158 struct net_device *dev;
3159 struct nlattr *tb[IFLA_MAX+1];
3160 u32 ext_filter_mask = 0;
3161 u16 min_ifinfo_dump_size = 0;
3164 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3165 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3166 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3168 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3169 if (tb[IFLA_EXT_MASK])
3170 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3173 if (!ext_filter_mask)
3174 return NLMSG_GOODSIZE;
3176 * traverse the list of net devices and compute the minimum
3177 * buffer size based upon the filter mask.
3180 for_each_netdev_rcu(net, dev) {
3181 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
3187 return nlmsg_total_size(min_ifinfo_dump_size);
3190 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3193 int s_idx = cb->family;
3198 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3199 struct rtnl_link **tab;
3200 int type = cb->nlh->nlmsg_type-RTM_BASE;
3201 struct rtnl_link *link;
3202 rtnl_dumpit_func dumpit;
3204 if (idx < s_idx || idx == PF_PACKET)
3207 if (type < 0 || type >= RTM_NR_MSGTYPES)
3210 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3218 dumpit = link->dumpit;
3223 memset(&cb->args[0], 0, sizeof(cb->args));
3227 if (dumpit(skb, cb))
3235 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3236 unsigned int change,
3237 u32 event, gfp_t flags, int *new_nsid,
3240 struct net *net = dev_net(dev);
3241 struct sk_buff *skb;
3243 size_t if_info_size;
3245 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3249 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3250 type, 0, 0, change, 0, 0, event,
3251 new_nsid, new_ifindex, -1);
3253 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3254 WARN_ON(err == -EMSGSIZE);
3261 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3265 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3267 struct net *net = dev_net(dev);
3269 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3272 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3273 unsigned int change, u32 event,
3274 gfp_t flags, int *new_nsid, int new_ifindex)
3276 struct sk_buff *skb;
3278 if (dev->reg_state != NETREG_REGISTERED)
3281 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3284 rtmsg_ifinfo_send(skb, dev, flags);
3287 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3290 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3294 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3295 gfp_t flags, int *new_nsid, int new_ifindex)
3297 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3298 new_nsid, new_ifindex);
3301 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3302 struct net_device *dev,
3303 u8 *addr, u16 vid, u32 pid, u32 seq,
3304 int type, unsigned int flags,
3305 int nlflags, u16 ndm_state)
3307 struct nlmsghdr *nlh;
3310 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3314 ndm = nlmsg_data(nlh);
3315 ndm->ndm_family = AF_BRIDGE;
3318 ndm->ndm_flags = flags;
3320 ndm->ndm_ifindex = dev->ifindex;
3321 ndm->ndm_state = ndm_state;
3323 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3324 goto nla_put_failure;
3326 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3327 goto nla_put_failure;
3329 nlmsg_end(skb, nlh);
3333 nlmsg_cancel(skb, nlh);
3337 static inline size_t rtnl_fdb_nlmsg_size(void)
3339 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3340 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3341 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3345 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3348 struct net *net = dev_net(dev);
3349 struct sk_buff *skb;
3352 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3356 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3357 0, 0, type, NTF_SELF, 0, ndm_state);
3363 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3366 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3370 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3372 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3373 struct nlattr *tb[],
3374 struct net_device *dev,
3375 const unsigned char *addr, u16 vid,
3380 /* If aging addresses are supported device will need to
3381 * implement its own handler for this.
3383 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3384 pr_info("%s: FDB only supports static addresses\n", dev->name);
3389 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3393 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3394 err = dev_uc_add_excl(dev, addr);
3395 else if (is_multicast_ether_addr(addr))
3396 err = dev_mc_add_excl(dev, addr);
3398 /* Only return duplicate errors if NLM_F_EXCL is set */
3399 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3404 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3406 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
3407 struct netlink_ext_ack *extack)
3412 if (nla_len(vlan_attr) != sizeof(u16)) {
3413 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
3417 vid = nla_get_u16(vlan_attr);
3419 if (!vid || vid >= VLAN_VID_MASK) {
3420 NL_SET_ERR_MSG(extack, "invalid vlan id");
3428 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3429 struct netlink_ext_ack *extack)
3431 struct net *net = sock_net(skb->sk);
3433 struct nlattr *tb[NDA_MAX+1];
3434 struct net_device *dev;
3439 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3443 ndm = nlmsg_data(nlh);
3444 if (ndm->ndm_ifindex == 0) {
3445 NL_SET_ERR_MSG(extack, "invalid ifindex");
3449 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3451 NL_SET_ERR_MSG(extack, "unknown ifindex");
3455 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3456 NL_SET_ERR_MSG(extack, "invalid address");
3460 addr = nla_data(tb[NDA_LLADDR]);
3462 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3468 /* Support fdb on master device the net/bridge default case */
3469 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3470 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3471 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3472 const struct net_device_ops *ops = br_dev->netdev_ops;
3474 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3479 ndm->ndm_flags &= ~NTF_MASTER;
3482 /* Embedded bridge, macvlan, and any other device support */
3483 if ((ndm->ndm_flags & NTF_SELF)) {
3484 if (dev->netdev_ops->ndo_fdb_add)
3485 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3489 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3493 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3495 ndm->ndm_flags &= ~NTF_SELF;
3503 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3505 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3506 struct nlattr *tb[],
3507 struct net_device *dev,
3508 const unsigned char *addr, u16 vid)
3512 /* If aging addresses are supported device will need to
3513 * implement its own handler for this.
3515 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3516 pr_info("%s: FDB only supports static addresses\n", dev->name);
3520 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3521 err = dev_uc_del(dev, addr);
3522 else if (is_multicast_ether_addr(addr))
3523 err = dev_mc_del(dev, addr);
3527 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3529 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3530 struct netlink_ext_ack *extack)
3532 struct net *net = sock_net(skb->sk);
3534 struct nlattr *tb[NDA_MAX+1];
3535 struct net_device *dev;
3540 if (!netlink_capable(skb, CAP_NET_ADMIN))
3543 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3547 ndm = nlmsg_data(nlh);
3548 if (ndm->ndm_ifindex == 0) {
3549 NL_SET_ERR_MSG(extack, "invalid ifindex");
3553 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3555 NL_SET_ERR_MSG(extack, "unknown ifindex");
3559 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3560 NL_SET_ERR_MSG(extack, "invalid address");
3564 addr = nla_data(tb[NDA_LLADDR]);
3566 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3572 /* Support fdb on master device the net/bridge default case */
3573 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3574 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3575 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3576 const struct net_device_ops *ops = br_dev->netdev_ops;
3578 if (ops->ndo_fdb_del)
3579 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3584 ndm->ndm_flags &= ~NTF_MASTER;
3587 /* Embedded bridge, macvlan, and any other device support */
3588 if (ndm->ndm_flags & NTF_SELF) {
3589 if (dev->netdev_ops->ndo_fdb_del)
3590 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3593 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3596 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3598 ndm->ndm_flags &= ~NTF_SELF;
3605 static int nlmsg_populate_fdb(struct sk_buff *skb,
3606 struct netlink_callback *cb,
3607 struct net_device *dev,
3609 struct netdev_hw_addr_list *list)
3611 struct netdev_hw_addr *ha;
3615 portid = NETLINK_CB(cb->skb).portid;
3616 seq = cb->nlh->nlmsg_seq;
3618 list_for_each_entry(ha, &list->list, list) {
3619 if (*idx < cb->args[2])
3622 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3624 RTM_NEWNEIGH, NTF_SELF,
3625 NLM_F_MULTI, NUD_PERMANENT);
3635 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3636 * @nlh: netlink message header
3639 * Default netdevice operation to dump the existing unicast address list.
3640 * Returns number of addresses from list put in skb.
3642 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3643 struct netlink_callback *cb,
3644 struct net_device *dev,
3645 struct net_device *filter_dev,
3650 netif_addr_lock_bh(dev);
3651 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3654 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3656 netif_addr_unlock_bh(dev);
3659 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3661 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3663 struct net_device *dev;
3664 struct nlattr *tb[IFLA_MAX+1];
3665 struct net_device *br_dev = NULL;
3666 const struct net_device_ops *ops = NULL;
3667 const struct net_device_ops *cops = NULL;
3668 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3669 struct net *net = sock_net(skb->sk);
3670 struct hlist_head *head;
3678 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3679 IFLA_MAX, ifla_policy, NULL);
3682 } else if (err == 0) {
3683 if (tb[IFLA_MASTER])
3684 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3687 brport_idx = ifm->ifi_index;
3690 br_dev = __dev_get_by_index(net, br_idx);
3694 ops = br_dev->netdev_ops;
3698 s_idx = cb->args[1];
3700 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3702 head = &net->dev_index_head[h];
3703 hlist_for_each_entry(dev, head, index_hlist) {
3705 if (brport_idx && (dev->ifindex != brport_idx))
3708 if (!br_idx) { /* user did not specify a specific bridge */
3709 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3710 br_dev = netdev_master_upper_dev_get(dev);
3711 cops = br_dev->netdev_ops;
3714 if (dev != br_dev &&
3715 !(dev->priv_flags & IFF_BRIDGE_PORT))
3718 if (br_dev != netdev_master_upper_dev_get(dev) &&
3719 !(dev->priv_flags & IFF_EBRIDGE))
3727 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3728 if (cops && cops->ndo_fdb_dump) {
3729 err = cops->ndo_fdb_dump(skb, cb,
3732 if (err == -EMSGSIZE)
3737 if (dev->netdev_ops->ndo_fdb_dump)
3738 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3742 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3744 if (err == -EMSGSIZE)
3749 /* reset fdb offset to 0 for rest of the interfaces */
3765 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3766 unsigned int attrnum, unsigned int flag)
3769 return nla_put_u8(skb, attrnum, !!(flags & flag));
3773 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3774 struct net_device *dev, u16 mode,
3775 u32 flags, u32 mask, int nlflags,
3777 int (*vlan_fill)(struct sk_buff *skb,
3778 struct net_device *dev,
3781 struct nlmsghdr *nlh;
3782 struct ifinfomsg *ifm;
3783 struct nlattr *br_afspec;
3784 struct nlattr *protinfo;
3785 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3786 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3789 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3793 ifm = nlmsg_data(nlh);
3794 ifm->ifi_family = AF_BRIDGE;
3796 ifm->ifi_type = dev->type;
3797 ifm->ifi_index = dev->ifindex;
3798 ifm->ifi_flags = dev_get_flags(dev);
3799 ifm->ifi_change = 0;
3802 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3803 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3804 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3806 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3808 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3809 (dev->ifindex != dev_get_iflink(dev) &&
3810 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3811 goto nla_put_failure;
3813 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3815 goto nla_put_failure;
3817 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3818 nla_nest_cancel(skb, br_afspec);
3819 goto nla_put_failure;
3822 if (mode != BRIDGE_MODE_UNDEF) {
3823 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3824 nla_nest_cancel(skb, br_afspec);
3825 goto nla_put_failure;
3829 err = vlan_fill(skb, dev, filter_mask);
3831 nla_nest_cancel(skb, br_afspec);
3832 goto nla_put_failure;
3835 nla_nest_end(skb, br_afspec);
3837 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3839 goto nla_put_failure;
3841 if (brport_nla_put_flag(skb, flags, mask,
3842 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3843 brport_nla_put_flag(skb, flags, mask,
3844 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3845 brport_nla_put_flag(skb, flags, mask,
3846 IFLA_BRPORT_FAST_LEAVE,
3847 BR_MULTICAST_FAST_LEAVE) ||
3848 brport_nla_put_flag(skb, flags, mask,
3849 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3850 brport_nla_put_flag(skb, flags, mask,
3851 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3852 brport_nla_put_flag(skb, flags, mask,
3853 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3854 brport_nla_put_flag(skb, flags, mask,
3855 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3856 brport_nla_put_flag(skb, flags, mask,
3857 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3858 nla_nest_cancel(skb, protinfo);
3859 goto nla_put_failure;
3862 nla_nest_end(skb, protinfo);
3864 nlmsg_end(skb, nlh);
3867 nlmsg_cancel(skb, nlh);
3868 return err ? err : -EMSGSIZE;
3870 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3872 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3874 struct net *net = sock_net(skb->sk);
3875 struct net_device *dev;
3877 u32 portid = NETLINK_CB(cb->skb).portid;
3878 u32 seq = cb->nlh->nlmsg_seq;
3879 u32 filter_mask = 0;
3882 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3883 struct nlattr *extfilt;
3885 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3888 if (nla_len(extfilt) < sizeof(filter_mask))
3891 filter_mask = nla_get_u32(extfilt);
3896 for_each_netdev_rcu(net, dev) {
3897 const struct net_device_ops *ops = dev->netdev_ops;
3898 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3900 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3901 if (idx >= cb->args[0]) {
3902 err = br_dev->netdev_ops->ndo_bridge_getlink(
3903 skb, portid, seq, dev,
3904 filter_mask, NLM_F_MULTI);
3905 if (err < 0 && err != -EOPNOTSUPP) {
3906 if (likely(skb->len))
3915 if (ops->ndo_bridge_getlink) {
3916 if (idx >= cb->args[0]) {
3917 err = ops->ndo_bridge_getlink(skb, portid,
3921 if (err < 0 && err != -EOPNOTSUPP) {
3922 if (likely(skb->len))
3939 static inline size_t bridge_nlmsg_size(void)
3941 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3942 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3943 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3944 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3945 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3946 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3947 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3948 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3949 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3950 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3951 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3954 static int rtnl_bridge_notify(struct net_device *dev)
3956 struct net *net = dev_net(dev);
3957 struct sk_buff *skb;
3958 int err = -EOPNOTSUPP;
3960 if (!dev->netdev_ops->ndo_bridge_getlink)
3963 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3969 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3976 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3979 WARN_ON(err == -EMSGSIZE);
3982 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3986 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3987 struct netlink_ext_ack *extack)
3989 struct net *net = sock_net(skb->sk);
3990 struct ifinfomsg *ifm;
3991 struct net_device *dev;
3992 struct nlattr *br_spec, *attr = NULL;
3993 int rem, err = -EOPNOTSUPP;
3995 bool have_flags = false;
3997 if (nlmsg_len(nlh) < sizeof(*ifm))
4000 ifm = nlmsg_data(nlh);
4001 if (ifm->ifi_family != AF_BRIDGE)
4002 return -EPFNOSUPPORT;
4004 dev = __dev_get_by_index(net, ifm->ifi_index);
4006 NL_SET_ERR_MSG(extack, "unknown ifindex");
4010 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4012 nla_for_each_nested(attr, br_spec, rem) {
4013 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4014 if (nla_len(attr) < sizeof(flags))
4018 flags = nla_get_u16(attr);
4024 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4025 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4027 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
4032 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
4036 flags &= ~BRIDGE_FLAGS_MASTER;
4039 if ((flags & BRIDGE_FLAGS_SELF)) {
4040 if (!dev->netdev_ops->ndo_bridge_setlink)
4043 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4046 flags &= ~BRIDGE_FLAGS_SELF;
4048 /* Generate event to notify upper layer of bridge
4051 err = rtnl_bridge_notify(dev);
4056 memcpy(nla_data(attr), &flags, sizeof(flags));
4061 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
4062 struct netlink_ext_ack *extack)
4064 struct net *net = sock_net(skb->sk);
4065 struct ifinfomsg *ifm;
4066 struct net_device *dev;
4067 struct nlattr *br_spec, *attr = NULL;
4068 int rem, err = -EOPNOTSUPP;
4070 bool have_flags = false;
4072 if (nlmsg_len(nlh) < sizeof(*ifm))
4075 ifm = nlmsg_data(nlh);
4076 if (ifm->ifi_family != AF_BRIDGE)
4077 return -EPFNOSUPPORT;
4079 dev = __dev_get_by_index(net, ifm->ifi_index);
4081 NL_SET_ERR_MSG(extack, "unknown ifindex");
4085 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4087 nla_for_each_nested(attr, br_spec, rem) {
4088 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4089 if (nla_len(attr) < sizeof(flags))
4093 flags = nla_get_u16(attr);
4099 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4100 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4102 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
4107 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
4111 flags &= ~BRIDGE_FLAGS_MASTER;
4114 if ((flags & BRIDGE_FLAGS_SELF)) {
4115 if (!dev->netdev_ops->ndo_bridge_dellink)
4118 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
4122 flags &= ~BRIDGE_FLAGS_SELF;
4124 /* Generate event to notify upper layer of bridge
4127 err = rtnl_bridge_notify(dev);
4132 memcpy(nla_data(attr), &flags, sizeof(flags));
4137 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
4139 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
4140 (!idxattr || idxattr == attrid);
4143 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
4144 static int rtnl_get_offload_stats_attr_size(int attr_id)
4147 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
4148 return sizeof(struct rtnl_link_stats64);
4154 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
4157 struct nlattr *attr = NULL;
4162 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4163 dev->netdev_ops->ndo_get_offload_stats))
4166 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4167 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4168 if (attr_id < *prividx)
4171 size = rtnl_get_offload_stats_attr_size(attr_id);
4175 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4178 attr = nla_reserve_64bit(skb, attr_id, size,
4179 IFLA_OFFLOAD_XSTATS_UNSPEC);
4181 goto nla_put_failure;
4183 attr_data = nla_data(attr);
4184 memset(attr_data, 0, size);
4185 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
4188 goto get_offload_stats_failure;
4199 get_offload_stats_failure:
4204 static int rtnl_get_offload_stats_size(const struct net_device *dev)
4210 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4211 dev->netdev_ops->ndo_get_offload_stats))
4214 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4215 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4216 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4218 size = rtnl_get_offload_stats_attr_size(attr_id);
4219 nla_size += nla_total_size_64bit(size);
4223 nla_size += nla_total_size(0);
4228 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
4229 int type, u32 pid, u32 seq, u32 change,
4230 unsigned int flags, unsigned int filter_mask,
4231 int *idxattr, int *prividx)
4233 struct if_stats_msg *ifsm;
4234 struct nlmsghdr *nlh;
4235 struct nlattr *attr;
4236 int s_prividx = *prividx;
4241 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
4245 ifsm = nlmsg_data(nlh);
4246 ifsm->family = PF_UNSPEC;
4249 ifsm->ifindex = dev->ifindex;
4250 ifsm->filter_mask = filter_mask;
4252 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
4253 struct rtnl_link_stats64 *sp;
4255 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
4256 sizeof(struct rtnl_link_stats64),
4259 goto nla_put_failure;
4261 sp = nla_data(attr);
4262 dev_get_stats(dev, sp);
4265 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
4266 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4268 if (ops && ops->fill_linkxstats) {
4269 *idxattr = IFLA_STATS_LINK_XSTATS;
4270 attr = nla_nest_start(skb,
4271 IFLA_STATS_LINK_XSTATS);
4273 goto nla_put_failure;
4275 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4276 nla_nest_end(skb, attr);
4278 goto nla_put_failure;
4283 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
4285 const struct rtnl_link_ops *ops = NULL;
4286 const struct net_device *master;
4288 master = netdev_master_upper_dev_get(dev);
4290 ops = master->rtnl_link_ops;
4291 if (ops && ops->fill_linkxstats) {
4292 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
4293 attr = nla_nest_start(skb,
4294 IFLA_STATS_LINK_XSTATS_SLAVE);
4296 goto nla_put_failure;
4298 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4299 nla_nest_end(skb, attr);
4301 goto nla_put_failure;
4306 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
4308 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
4309 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
4311 goto nla_put_failure;
4313 err = rtnl_get_offload_stats(skb, dev, prividx);
4314 if (err == -ENODATA)
4315 nla_nest_cancel(skb, attr);
4317 nla_nest_end(skb, attr);
4319 if (err && err != -ENODATA)
4320 goto nla_put_failure;
4324 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
4325 struct rtnl_af_ops *af_ops;
4327 *idxattr = IFLA_STATS_AF_SPEC;
4328 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
4330 goto nla_put_failure;
4333 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4334 if (af_ops->fill_stats_af) {
4338 af = nla_nest_start(skb, af_ops->family);
4341 goto nla_put_failure;
4343 err = af_ops->fill_stats_af(skb, dev);
4345 if (err == -ENODATA) {
4346 nla_nest_cancel(skb, af);
4347 } else if (err < 0) {
4349 goto nla_put_failure;
4352 nla_nest_end(skb, af);
4357 nla_nest_end(skb, attr);
4362 nlmsg_end(skb, nlh);
4367 /* not a multi message or no progress mean a real error */
4368 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
4369 nlmsg_cancel(skb, nlh);
4371 nlmsg_end(skb, nlh);
4376 static size_t if_nlmsg_stats_size(const struct net_device *dev,
4381 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4382 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
4384 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4385 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4386 int attr = IFLA_STATS_LINK_XSTATS;
4388 if (ops && ops->get_linkxstats_size) {
4389 size += nla_total_size(ops->get_linkxstats_size(dev,
4391 /* for IFLA_STATS_LINK_XSTATS */
4392 size += nla_total_size(0);
4396 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4397 struct net_device *_dev = (struct net_device *)dev;
4398 const struct rtnl_link_ops *ops = NULL;
4399 const struct net_device *master;
4401 /* netdev_master_upper_dev_get can't take const */
4402 master = netdev_master_upper_dev_get(_dev);
4404 ops = master->rtnl_link_ops;
4405 if (ops && ops->get_linkxstats_size) {
4406 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4408 size += nla_total_size(ops->get_linkxstats_size(dev,
4410 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4411 size += nla_total_size(0);
4415 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4416 size += rtnl_get_offload_stats_size(dev);
4418 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4419 struct rtnl_af_ops *af_ops;
4421 /* for IFLA_STATS_AF_SPEC */
4422 size += nla_total_size(0);
4425 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4426 if (af_ops->get_stats_af_size) {
4427 size += nla_total_size(
4428 af_ops->get_stats_af_size(dev));
4431 size += nla_total_size(0);
4440 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4441 struct netlink_ext_ack *extack)
4443 struct net *net = sock_net(skb->sk);
4444 struct net_device *dev = NULL;
4445 int idxattr = 0, prividx = 0;
4446 struct if_stats_msg *ifsm;
4447 struct sk_buff *nskb;
4451 if (nlmsg_len(nlh) < sizeof(*ifsm))
4454 ifsm = nlmsg_data(nlh);
4455 if (ifsm->ifindex > 0)
4456 dev = __dev_get_by_index(net, ifsm->ifindex);
4463 filter_mask = ifsm->filter_mask;
4467 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4471 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4472 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4473 0, filter_mask, &idxattr, &prividx);
4475 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4476 WARN_ON(err == -EMSGSIZE);
4479 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4485 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4487 int h, s_h, err, s_idx, s_idxattr, s_prividx;
4488 struct net *net = sock_net(skb->sk);
4489 unsigned int flags = NLM_F_MULTI;
4490 struct if_stats_msg *ifsm;
4491 struct hlist_head *head;
4492 struct net_device *dev;
4493 u32 filter_mask = 0;
4497 s_idx = cb->args[1];
4498 s_idxattr = cb->args[2];
4499 s_prividx = cb->args[3];
4501 cb->seq = net->dev_base_seq;
4503 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4506 ifsm = nlmsg_data(cb->nlh);
4507 filter_mask = ifsm->filter_mask;
4511 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4513 head = &net->dev_index_head[h];
4514 hlist_for_each_entry(dev, head, index_hlist) {
4517 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4518 NETLINK_CB(cb->skb).portid,
4519 cb->nlh->nlmsg_seq, 0,
4521 &s_idxattr, &s_prividx);
4522 /* If we ran out of room on the first message,
4525 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4531 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4537 cb->args[3] = s_prividx;
4538 cb->args[2] = s_idxattr;
4545 /* Process one rtnetlink message. */
4547 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4548 struct netlink_ext_ack *extack)
4550 struct net *net = sock_net(skb->sk);
4551 struct rtnl_link *link;
4552 struct module *owner;
4553 int err = -EOPNOTSUPP;
4554 rtnl_doit_func doit;
4560 type = nlh->nlmsg_type;
4566 /* All the messages must have at least 1 byte length */
4567 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
4570 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4573 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4577 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4579 rtnl_dumpit_func dumpit;
4580 u16 min_dump_alloc = 0;
4582 link = rtnl_get_link(family, type);
4583 if (!link || !link->dumpit) {
4585 link = rtnl_get_link(family, type);
4586 if (!link || !link->dumpit)
4589 owner = link->owner;
4590 dumpit = link->dumpit;
4592 if (type == RTM_GETLINK - RTM_BASE)
4593 min_dump_alloc = rtnl_calcit(skb, nlh);
4596 /* need to do this before rcu_read_unlock() */
4597 if (!try_module_get(owner))
4598 err = -EPROTONOSUPPORT;
4604 struct netlink_dump_control c = {
4606 .min_dump_alloc = min_dump_alloc,
4609 err = netlink_dump_start(rtnl, skb, nlh, &c);
4610 /* netlink_dump_start() will keep a reference on
4611 * module if dump is still in progress.
4618 link = rtnl_get_link(family, type);
4619 if (!link || !link->doit) {
4621 link = rtnl_get_link(PF_UNSPEC, type);
4622 if (!link || !link->doit)
4626 owner = link->owner;
4627 if (!try_module_get(owner)) {
4628 err = -EPROTONOSUPPORT;
4632 flags = link->flags;
4633 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
4637 err = doit(skb, nlh, extack);
4644 link = rtnl_get_link(family, type);
4645 if (link && link->doit)
4646 err = link->doit(skb, nlh, extack);
4662 static void rtnetlink_rcv(struct sk_buff *skb)
4664 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4667 static int rtnetlink_bind(struct net *net, int group)
4670 case RTNLGRP_IPV4_MROUTE_R:
4671 case RTNLGRP_IPV6_MROUTE_R:
4672 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4679 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4681 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4685 case NETDEV_CHANGEMTU:
4686 case NETDEV_CHANGEADDR:
4687 case NETDEV_CHANGENAME:
4688 case NETDEV_FEAT_CHANGE:
4689 case NETDEV_BONDING_FAILOVER:
4690 case NETDEV_POST_TYPE_CHANGE:
4691 case NETDEV_NOTIFY_PEERS:
4692 case NETDEV_CHANGEUPPER:
4693 case NETDEV_RESEND_IGMP:
4694 case NETDEV_CHANGEINFODATA:
4695 case NETDEV_CHANGELOWERSTATE:
4696 case NETDEV_CHANGE_TX_QUEUE_LEN:
4697 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4698 GFP_KERNEL, NULL, 0);
4706 static struct notifier_block rtnetlink_dev_notifier = {
4707 .notifier_call = rtnetlink_event,
4711 static int __net_init rtnetlink_net_init(struct net *net)
4714 struct netlink_kernel_cfg cfg = {
4715 .groups = RTNLGRP_MAX,
4716 .input = rtnetlink_rcv,
4717 .cb_mutex = &rtnl_mutex,
4718 .flags = NL_CFG_F_NONROOT_RECV,
4719 .bind = rtnetlink_bind,
4722 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4729 static void __net_exit rtnetlink_net_exit(struct net *net)
4731 netlink_kernel_release(net->rtnl);
4735 static struct pernet_operations rtnetlink_net_ops = {
4736 .init = rtnetlink_net_init,
4737 .exit = rtnetlink_net_exit,
4740 void __init rtnetlink_init(void)
4742 if (register_pernet_subsys(&rtnetlink_net_ops))
4743 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4745 register_netdevice_notifier(&rtnetlink_dev_notifier);
4747 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4748 rtnl_dump_ifinfo, 0);
4749 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
4750 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
4751 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
4753 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
4754 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
4755 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
4757 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
4758 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
4759 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
4761 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
4762 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
4763 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
4765 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,