2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
50 #include <net/protocol.h>
52 #include <net/route.h>
56 #include <net/pkt_sched.h>
57 #include <net/fib_rules.h>
58 #include <net/rtnetlink.h>
59 #include <net/net_namespace.h>
61 #define RTNL_MAX_TYPE 50
62 #define RTNL_SLAVE_MAX_TYPE 36
66 rtnl_dumpit_func dumpit;
72 static DEFINE_MUTEX(rtnl_mutex);
76 mutex_lock(&rtnl_mutex);
78 EXPORT_SYMBOL(rtnl_lock);
80 int rtnl_lock_killable(void)
82 return mutex_lock_killable(&rtnl_mutex);
84 EXPORT_SYMBOL(rtnl_lock_killable);
86 static struct sk_buff *defer_kfree_skb_list;
87 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
90 tail->next = defer_kfree_skb_list;
91 defer_kfree_skb_list = head;
94 EXPORT_SYMBOL(rtnl_kfree_skbs);
96 void __rtnl_unlock(void)
98 struct sk_buff *head = defer_kfree_skb_list;
100 defer_kfree_skb_list = NULL;
102 mutex_unlock(&rtnl_mutex);
105 struct sk_buff *next = head->next;
113 void rtnl_unlock(void)
115 /* This fellow will unlock it for us. */
118 EXPORT_SYMBOL(rtnl_unlock);
120 int rtnl_trylock(void)
122 return mutex_trylock(&rtnl_mutex);
124 EXPORT_SYMBOL(rtnl_trylock);
126 int rtnl_is_locked(void)
128 return mutex_is_locked(&rtnl_mutex);
130 EXPORT_SYMBOL(rtnl_is_locked);
132 bool refcount_dec_and_rtnl_lock(refcount_t *r)
134 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
136 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
138 #ifdef CONFIG_PROVE_LOCKING
139 bool lockdep_rtnl_is_held(void)
141 return lockdep_is_held(&rtnl_mutex);
143 EXPORT_SYMBOL(lockdep_rtnl_is_held);
144 #endif /* #ifdef CONFIG_PROVE_LOCKING */
146 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
148 static inline int rtm_msgindex(int msgtype)
150 int msgindex = msgtype - RTM_BASE;
153 * msgindex < 0 implies someone tried to register a netlink
154 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
155 * the message type has not been added to linux/rtnetlink.h
157 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
162 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
164 struct rtnl_link **tab;
166 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
167 protocol = PF_UNSPEC;
169 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
171 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
176 static int rtnl_register_internal(struct module *owner,
177 int protocol, int msgtype,
178 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
181 struct rtnl_link *link, *old;
182 struct rtnl_link __rcu **tab;
186 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
187 msgindex = rtm_msgindex(msgtype);
190 tab = rtnl_msg_handlers[protocol];
192 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
196 /* ensures we see the 0 stores */
197 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
200 old = rtnl_dereference(tab[msgindex]);
202 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
206 link = kzalloc(sizeof(*link), GFP_KERNEL);
211 WARN_ON(link->owner && link->owner != owner);
214 WARN_ON(doit && link->doit && link->doit != doit);
217 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
219 link->dumpit = dumpit;
221 link->flags |= flags;
223 /* publish protocol:msgtype */
224 rcu_assign_pointer(tab[msgindex], link);
234 * rtnl_register_module - Register a rtnetlink message type
236 * @owner: module registering the hook (THIS_MODULE)
237 * @protocol: Protocol family or PF_UNSPEC
238 * @msgtype: rtnetlink message type
239 * @doit: Function pointer called for each request message
240 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
241 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
243 * Like rtnl_register, but for use by removable modules.
245 int rtnl_register_module(struct module *owner,
246 int protocol, int msgtype,
247 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
250 return rtnl_register_internal(owner, protocol, msgtype,
251 doit, dumpit, flags);
253 EXPORT_SYMBOL_GPL(rtnl_register_module);
256 * rtnl_register - Register a rtnetlink message type
257 * @protocol: Protocol family or PF_UNSPEC
258 * @msgtype: rtnetlink message type
259 * @doit: Function pointer called for each request message
260 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
261 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
263 * Registers the specified function pointers (at least one of them has
264 * to be non-NULL) to be called whenever a request message for the
265 * specified protocol family and message type is received.
267 * The special protocol family PF_UNSPEC may be used to define fallback
268 * function pointers for the case when no entry for the specific protocol
271 void rtnl_register(int protocol, int msgtype,
272 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
277 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
280 pr_err("Unable to register rtnetlink message handler, "
281 "protocol = %d, message type = %d\n", protocol, msgtype);
285 * rtnl_unregister - Unregister a rtnetlink message type
286 * @protocol: Protocol family or PF_UNSPEC
287 * @msgtype: rtnetlink message type
289 * Returns 0 on success or a negative error code.
291 int rtnl_unregister(int protocol, int msgtype)
293 struct rtnl_link **tab, *link;
296 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
297 msgindex = rtm_msgindex(msgtype);
300 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
306 link = tab[msgindex];
307 rcu_assign_pointer(tab[msgindex], NULL);
310 kfree_rcu(link, rcu);
314 EXPORT_SYMBOL_GPL(rtnl_unregister);
317 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
318 * @protocol : Protocol family or PF_UNSPEC
320 * Identical to calling rtnl_unregster() for all registered message types
321 * of a certain protocol family.
323 void rtnl_unregister_all(int protocol)
325 struct rtnl_link **tab, *link;
328 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
331 tab = rtnl_msg_handlers[protocol];
336 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
337 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
338 link = tab[msgindex];
342 rcu_assign_pointer(tab[msgindex], NULL);
343 kfree_rcu(link, rcu);
351 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
353 static LIST_HEAD(link_ops);
355 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
357 const struct rtnl_link_ops *ops;
359 list_for_each_entry(ops, &link_ops, list) {
360 if (!strcmp(ops->kind, kind))
367 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
368 * @ops: struct rtnl_link_ops * to register
370 * The caller must hold the rtnl_mutex. This function should be used
371 * by drivers that create devices during module initialization. It
372 * must be called before registering the devices.
374 * Returns 0 on success or a negative error code.
376 int __rtnl_link_register(struct rtnl_link_ops *ops)
378 if (rtnl_link_ops_get(ops->kind))
381 /* The check for setup is here because if ops
382 * does not have that filled up, it is not possible
383 * to use the ops for creating device. So do not
384 * fill up dellink as well. That disables rtnl_dellink.
386 if (ops->setup && !ops->dellink)
387 ops->dellink = unregister_netdevice_queue;
389 list_add_tail(&ops->list, &link_ops);
392 EXPORT_SYMBOL_GPL(__rtnl_link_register);
395 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
396 * @ops: struct rtnl_link_ops * to register
398 * Returns 0 on success or a negative error code.
400 int rtnl_link_register(struct rtnl_link_ops *ops)
404 /* Sanity-check max sizes to avoid stack buffer overflow. */
405 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
406 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
410 err = __rtnl_link_register(ops);
414 EXPORT_SYMBOL_GPL(rtnl_link_register);
416 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
418 struct net_device *dev;
419 LIST_HEAD(list_kill);
421 for_each_netdev(net, dev) {
422 if (dev->rtnl_link_ops == ops)
423 ops->dellink(dev, &list_kill);
425 unregister_netdevice_many(&list_kill);
429 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
430 * @ops: struct rtnl_link_ops * to unregister
432 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
433 * integrity (hold pernet_ops_rwsem for writing to close the race
434 * with setup_net() and cleanup_net()).
436 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
441 __rtnl_kill_links(net, ops);
443 list_del(&ops->list);
445 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
447 /* Return with the rtnl_lock held when there are no network
448 * devices unregistering in any network namespace.
450 static void rtnl_lock_unregistering_all(void)
454 DEFINE_WAIT_FUNC(wait, woken_wake_function);
456 add_wait_queue(&netdev_unregistering_wq, &wait);
458 unregistering = false;
460 /* We held write locked pernet_ops_rwsem, and parallel
461 * setup_net() and cleanup_net() are not possible.
464 if (net->dev_unreg_count > 0) {
465 unregistering = true;
473 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
475 remove_wait_queue(&netdev_unregistering_wq, &wait);
479 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
480 * @ops: struct rtnl_link_ops * to unregister
482 void rtnl_link_unregister(struct rtnl_link_ops *ops)
484 /* Close the race with setup_net() and cleanup_net() */
485 down_write(&pernet_ops_rwsem);
486 rtnl_lock_unregistering_all();
487 __rtnl_link_unregister(ops);
489 up_write(&pernet_ops_rwsem);
491 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
493 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
495 struct net_device *master_dev;
496 const struct rtnl_link_ops *ops;
501 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
505 ops = master_dev->rtnl_link_ops;
506 if (!ops || !ops->get_slave_size)
508 /* IFLA_INFO_SLAVE_DATA + nested data */
509 size = nla_total_size(sizeof(struct nlattr)) +
510 ops->get_slave_size(master_dev, dev);
517 static size_t rtnl_link_get_size(const struct net_device *dev)
519 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
525 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
526 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
529 /* IFLA_INFO_DATA + nested data */
530 size += nla_total_size(sizeof(struct nlattr)) +
533 if (ops->get_xstats_size)
534 /* IFLA_INFO_XSTATS */
535 size += nla_total_size(ops->get_xstats_size(dev));
537 size += rtnl_link_get_slave_info_data_size(dev);
542 static LIST_HEAD(rtnl_af_ops);
544 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
546 const struct rtnl_af_ops *ops;
548 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
549 if (ops->family == family)
557 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
558 * @ops: struct rtnl_af_ops * to register
560 * Returns 0 on success or a negative error code.
562 void rtnl_af_register(struct rtnl_af_ops *ops)
565 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
568 EXPORT_SYMBOL_GPL(rtnl_af_register);
571 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
572 * @ops: struct rtnl_af_ops * to unregister
574 void rtnl_af_unregister(struct rtnl_af_ops *ops)
577 list_del_rcu(&ops->list);
582 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
584 static size_t rtnl_link_get_af_size(const struct net_device *dev,
587 struct rtnl_af_ops *af_ops;
591 size = nla_total_size(sizeof(struct nlattr));
594 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
595 if (af_ops->get_link_af_size) {
596 /* AF_* + nested data */
597 size += nla_total_size(sizeof(struct nlattr)) +
598 af_ops->get_link_af_size(dev, ext_filter_mask);
606 static bool rtnl_have_link_slave_info(const struct net_device *dev)
608 struct net_device *master_dev;
613 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
614 if (master_dev && master_dev->rtnl_link_ops)
620 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
621 const struct net_device *dev)
623 struct net_device *master_dev;
624 const struct rtnl_link_ops *ops;
625 struct nlattr *slave_data;
628 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
631 ops = master_dev->rtnl_link_ops;
634 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
636 if (ops->fill_slave_info) {
637 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
640 err = ops->fill_slave_info(skb, master_dev, dev);
642 goto err_cancel_slave_data;
643 nla_nest_end(skb, slave_data);
647 err_cancel_slave_data:
648 nla_nest_cancel(skb, slave_data);
652 static int rtnl_link_info_fill(struct sk_buff *skb,
653 const struct net_device *dev)
655 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
661 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
663 if (ops->fill_xstats) {
664 err = ops->fill_xstats(skb, dev);
668 if (ops->fill_info) {
669 data = nla_nest_start(skb, IFLA_INFO_DATA);
672 err = ops->fill_info(skb, dev);
674 goto err_cancel_data;
675 nla_nest_end(skb, data);
680 nla_nest_cancel(skb, data);
684 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
686 struct nlattr *linkinfo;
689 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
690 if (linkinfo == NULL)
693 err = rtnl_link_info_fill(skb, dev);
695 goto err_cancel_link;
697 err = rtnl_link_slave_info_fill(skb, dev);
699 goto err_cancel_link;
701 nla_nest_end(skb, linkinfo);
705 nla_nest_cancel(skb, linkinfo);
710 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
712 struct sock *rtnl = net->rtnl;
715 NETLINK_CB(skb).dst_group = group;
717 refcount_inc(&skb->users);
718 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
720 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
724 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
726 struct sock *rtnl = net->rtnl;
728 return nlmsg_unicast(rtnl, skb, pid);
730 EXPORT_SYMBOL(rtnl_unicast);
732 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
733 struct nlmsghdr *nlh, gfp_t flags)
735 struct sock *rtnl = net->rtnl;
739 report = nlmsg_report(nlh);
741 nlmsg_notify(rtnl, skb, pid, group, report, flags);
743 EXPORT_SYMBOL(rtnl_notify);
745 void rtnl_set_sk_err(struct net *net, u32 group, int error)
747 struct sock *rtnl = net->rtnl;
749 netlink_set_err(rtnl, 0, group, error);
751 EXPORT_SYMBOL(rtnl_set_sk_err);
753 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
758 mx = nla_nest_start(skb, RTA_METRICS);
762 for (i = 0; i < RTAX_MAX; i++) {
764 if (i == RTAX_CC_ALGO - 1) {
765 char tmp[TCP_CA_NAME_MAX], *name;
767 name = tcp_ca_get_name_by_key(metrics[i], tmp);
770 if (nla_put_string(skb, i + 1, name))
771 goto nla_put_failure;
772 } else if (i == RTAX_FEATURES - 1) {
773 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
777 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
778 if (nla_put_u32(skb, i + 1, user_features))
779 goto nla_put_failure;
781 if (nla_put_u32(skb, i + 1, metrics[i]))
782 goto nla_put_failure;
789 nla_nest_cancel(skb, mx);
793 return nla_nest_end(skb, mx);
796 nla_nest_cancel(skb, mx);
799 EXPORT_SYMBOL(rtnetlink_put_metrics);
801 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
802 long expires, u32 error)
804 struct rta_cacheinfo ci = {
810 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
811 ci.rta_used = dst->__use;
812 ci.rta_clntref = atomic_read(&dst->__refcnt);
817 clock = jiffies_to_clock_t(abs(expires));
818 clock = min_t(unsigned long, clock, INT_MAX);
819 ci.rta_expires = (expires > 0) ? clock : -clock;
821 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
823 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
825 static void set_operstate(struct net_device *dev, unsigned char transition)
827 unsigned char operstate = dev->operstate;
829 switch (transition) {
831 if ((operstate == IF_OPER_DORMANT ||
832 operstate == IF_OPER_UNKNOWN) &&
834 operstate = IF_OPER_UP;
837 case IF_OPER_DORMANT:
838 if (operstate == IF_OPER_UP ||
839 operstate == IF_OPER_UNKNOWN)
840 operstate = IF_OPER_DORMANT;
844 if (dev->operstate != operstate) {
845 write_lock_bh(&dev_base_lock);
846 dev->operstate = operstate;
847 write_unlock_bh(&dev_base_lock);
848 netdev_state_change(dev);
852 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
854 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
855 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
858 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
859 const struct ifinfomsg *ifm)
861 unsigned int flags = ifm->ifi_flags;
863 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
865 flags = (flags & ifm->ifi_change) |
866 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
871 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
872 const struct rtnl_link_stats64 *b)
874 a->rx_packets = b->rx_packets;
875 a->tx_packets = b->tx_packets;
876 a->rx_bytes = b->rx_bytes;
877 a->tx_bytes = b->tx_bytes;
878 a->rx_errors = b->rx_errors;
879 a->tx_errors = b->tx_errors;
880 a->rx_dropped = b->rx_dropped;
881 a->tx_dropped = b->tx_dropped;
883 a->multicast = b->multicast;
884 a->collisions = b->collisions;
886 a->rx_length_errors = b->rx_length_errors;
887 a->rx_over_errors = b->rx_over_errors;
888 a->rx_crc_errors = b->rx_crc_errors;
889 a->rx_frame_errors = b->rx_frame_errors;
890 a->rx_fifo_errors = b->rx_fifo_errors;
891 a->rx_missed_errors = b->rx_missed_errors;
893 a->tx_aborted_errors = b->tx_aborted_errors;
894 a->tx_carrier_errors = b->tx_carrier_errors;
895 a->tx_fifo_errors = b->tx_fifo_errors;
896 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
897 a->tx_window_errors = b->tx_window_errors;
899 a->rx_compressed = b->rx_compressed;
900 a->tx_compressed = b->tx_compressed;
902 a->rx_nohandler = b->rx_nohandler;
906 static inline int rtnl_vfinfo_size(const struct net_device *dev,
909 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
910 int num_vfs = dev_num_vf(dev->dev.parent);
911 size_t size = nla_total_size(0);
914 nla_total_size(sizeof(struct ifla_vf_mac)) +
915 nla_total_size(sizeof(struct ifla_vf_vlan)) +
916 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
917 nla_total_size(MAX_VLAN_LIST_LEN *
918 sizeof(struct ifla_vf_vlan_info)) +
919 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
920 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
921 nla_total_size(sizeof(struct ifla_vf_rate)) +
922 nla_total_size(sizeof(struct ifla_vf_link_state)) +
923 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
924 nla_total_size(0) + /* nest IFLA_VF_STATS */
925 /* IFLA_VF_STATS_RX_PACKETS */
926 nla_total_size_64bit(sizeof(__u64)) +
927 /* IFLA_VF_STATS_TX_PACKETS */
928 nla_total_size_64bit(sizeof(__u64)) +
929 /* IFLA_VF_STATS_RX_BYTES */
930 nla_total_size_64bit(sizeof(__u64)) +
931 /* IFLA_VF_STATS_TX_BYTES */
932 nla_total_size_64bit(sizeof(__u64)) +
933 /* IFLA_VF_STATS_BROADCAST */
934 nla_total_size_64bit(sizeof(__u64)) +
935 /* IFLA_VF_STATS_MULTICAST */
936 nla_total_size_64bit(sizeof(__u64)) +
937 /* IFLA_VF_STATS_RX_DROPPED */
938 nla_total_size_64bit(sizeof(__u64)) +
939 /* IFLA_VF_STATS_TX_DROPPED */
940 nla_total_size_64bit(sizeof(__u64)) +
941 nla_total_size(sizeof(struct ifla_vf_trust)));
947 static size_t rtnl_port_size(const struct net_device *dev,
950 size_t port_size = nla_total_size(4) /* PORT_VF */
951 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
952 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
953 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
954 + nla_total_size(1) /* PROT_VDP_REQUEST */
955 + nla_total_size(2); /* PORT_VDP_RESPONSE */
956 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
957 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
959 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
962 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
963 !(ext_filter_mask & RTEXT_FILTER_VF))
965 if (dev_num_vf(dev->dev.parent))
966 return port_self_size + vf_ports_size +
967 vf_port_size * dev_num_vf(dev->dev.parent);
969 return port_self_size;
972 static size_t rtnl_xdp_size(void)
974 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
975 nla_total_size(1) + /* XDP_ATTACHED */
976 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
977 nla_total_size(4); /* XDP_<mode>_PROG_ID */
982 static noinline size_t if_nlmsg_size(const struct net_device *dev,
985 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
986 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
987 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
988 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
989 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
990 + nla_total_size(sizeof(struct rtnl_link_stats))
991 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
992 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
993 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
994 + nla_total_size(4) /* IFLA_TXQLEN */
995 + nla_total_size(4) /* IFLA_WEIGHT */
996 + nla_total_size(4) /* IFLA_MTU */
997 + nla_total_size(4) /* IFLA_LINK */
998 + nla_total_size(4) /* IFLA_MASTER */
999 + nla_total_size(1) /* IFLA_CARRIER */
1000 + nla_total_size(4) /* IFLA_PROMISCUITY */
1001 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1002 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1003 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1004 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1005 + nla_total_size(1) /* IFLA_OPERSTATE */
1006 + nla_total_size(1) /* IFLA_LINKMODE */
1007 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1008 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1009 + nla_total_size(4) /* IFLA_GROUP */
1010 + nla_total_size(ext_filter_mask
1011 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1012 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1013 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1014 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1015 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1016 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1017 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1018 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1019 + rtnl_xdp_size() /* IFLA_XDP */
1020 + nla_total_size(4) /* IFLA_EVENT */
1021 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1022 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1023 + nla_total_size(1) /* IFLA_PROTO_DOWN */
1024 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1025 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1026 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1027 + nla_total_size(4) /* IFLA_MIN_MTU */
1028 + nla_total_size(4) /* IFLA_MAX_MTU */
1032 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1034 struct nlattr *vf_ports;
1035 struct nlattr *vf_port;
1039 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
1043 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1044 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
1046 goto nla_put_failure;
1047 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1048 goto nla_put_failure;
1049 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1050 if (err == -EMSGSIZE)
1051 goto nla_put_failure;
1053 nla_nest_cancel(skb, vf_port);
1056 nla_nest_end(skb, vf_port);
1059 nla_nest_end(skb, vf_ports);
1064 nla_nest_cancel(skb, vf_ports);
1068 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1070 struct nlattr *port_self;
1073 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
1077 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1079 nla_nest_cancel(skb, port_self);
1080 return (err == -EMSGSIZE) ? err : 0;
1083 nla_nest_end(skb, port_self);
1088 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1089 u32 ext_filter_mask)
1093 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1094 !(ext_filter_mask & RTEXT_FILTER_VF))
1097 err = rtnl_port_self_fill(skb, dev);
1101 if (dev_num_vf(dev->dev.parent)) {
1102 err = rtnl_vf_ports_fill(skb, dev);
1110 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1113 struct netdev_phys_item_id ppid;
1115 err = dev_get_phys_port_id(dev, &ppid);
1117 if (err == -EOPNOTSUPP)
1122 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1128 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1130 char name[IFNAMSIZ];
1133 err = dev_get_phys_port_name(dev, name, sizeof(name));
1135 if (err == -EOPNOTSUPP)
1140 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1146 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1148 struct netdev_phys_item_id ppid = { };
1151 err = dev_get_port_parent_id(dev, &ppid, false);
1153 if (err == -EOPNOTSUPP)
1158 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1164 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1165 struct net_device *dev)
1167 struct rtnl_link_stats64 *sp;
1168 struct nlattr *attr;
1170 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1171 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1175 sp = nla_data(attr);
1176 dev_get_stats(dev, sp);
1178 attr = nla_reserve(skb, IFLA_STATS,
1179 sizeof(struct rtnl_link_stats));
1183 copy_rtnl_link_stats(nla_data(attr), sp);
1188 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1189 struct net_device *dev,
1191 struct nlattr *vfinfo)
1193 struct ifla_vf_rss_query_en vf_rss_query_en;
1194 struct nlattr *vf, *vfstats, *vfvlanlist;
1195 struct ifla_vf_link_state vf_linkstate;
1196 struct ifla_vf_vlan_info vf_vlan_info;
1197 struct ifla_vf_spoofchk vf_spoofchk;
1198 struct ifla_vf_tx_rate vf_tx_rate;
1199 struct ifla_vf_stats vf_stats;
1200 struct ifla_vf_trust vf_trust;
1201 struct ifla_vf_vlan vf_vlan;
1202 struct ifla_vf_rate vf_rate;
1203 struct ifla_vf_mac vf_mac;
1204 struct ifla_vf_info ivi;
1206 memset(&ivi, 0, sizeof(ivi));
1208 /* Not all SR-IOV capable drivers support the
1209 * spoofcheck and "RSS query enable" query. Preset to
1210 * -1 so the user space tool can detect that the driver
1211 * didn't report anything.
1214 ivi.rss_query_en = -1;
1216 /* The default value for VF link state is "auto"
1217 * IFLA_VF_LINK_STATE_AUTO which equals zero
1220 /* VLAN Protocol by default is 802.1Q */
1221 ivi.vlan_proto = htons(ETH_P_8021Q);
1222 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1225 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1234 vf_rss_query_en.vf =
1235 vf_trust.vf = ivi.vf;
1237 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1238 vf_vlan.vlan = ivi.vlan;
1239 vf_vlan.qos = ivi.qos;
1240 vf_vlan_info.vlan = ivi.vlan;
1241 vf_vlan_info.qos = ivi.qos;
1242 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1243 vf_tx_rate.rate = ivi.max_tx_rate;
1244 vf_rate.min_tx_rate = ivi.min_tx_rate;
1245 vf_rate.max_tx_rate = ivi.max_tx_rate;
1246 vf_spoofchk.setting = ivi.spoofchk;
1247 vf_linkstate.link_state = ivi.linkstate;
1248 vf_rss_query_en.setting = ivi.rss_query_en;
1249 vf_trust.setting = ivi.trusted;
1250 vf = nla_nest_start(skb, IFLA_VF_INFO);
1252 goto nla_put_vfinfo_failure;
1253 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1254 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1255 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1257 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1259 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1261 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1263 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1264 sizeof(vf_rss_query_en),
1265 &vf_rss_query_en) ||
1266 nla_put(skb, IFLA_VF_TRUST,
1267 sizeof(vf_trust), &vf_trust))
1268 goto nla_put_vf_failure;
1269 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1271 goto nla_put_vf_failure;
1272 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1274 nla_nest_cancel(skb, vfvlanlist);
1275 goto nla_put_vf_failure;
1277 nla_nest_end(skb, vfvlanlist);
1278 memset(&vf_stats, 0, sizeof(vf_stats));
1279 if (dev->netdev_ops->ndo_get_vf_stats)
1280 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1282 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1284 goto nla_put_vf_failure;
1285 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1286 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1287 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1288 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1289 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1290 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1291 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1292 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1293 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1294 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1295 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1296 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1297 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1298 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1299 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1300 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1301 nla_nest_cancel(skb, vfstats);
1302 goto nla_put_vf_failure;
1304 nla_nest_end(skb, vfstats);
1305 nla_nest_end(skb, vf);
1309 nla_nest_cancel(skb, vf);
1310 nla_put_vfinfo_failure:
1311 nla_nest_cancel(skb, vfinfo);
1315 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1316 struct net_device *dev,
1317 u32 ext_filter_mask)
1319 struct nlattr *vfinfo;
1322 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1325 num_vfs = dev_num_vf(dev->dev.parent);
1326 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1329 if (!dev->netdev_ops->ndo_get_vf_config)
1332 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1336 for (i = 0; i < num_vfs; i++) {
1337 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1341 nla_nest_end(skb, vfinfo);
1345 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1347 struct rtnl_link_ifmap map;
1349 memset(&map, 0, sizeof(map));
1350 map.mem_start = dev->mem_start;
1351 map.mem_end = dev->mem_end;
1352 map.base_addr = dev->base_addr;
1355 map.port = dev->if_port;
1357 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1363 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1365 const struct bpf_prog *generic_xdp_prog;
1369 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1370 if (!generic_xdp_prog)
1372 return generic_xdp_prog->aux->id;
1375 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1377 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG);
1380 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1382 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf,
1386 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1387 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1388 u32 (*get_prog_id)(struct net_device *dev))
1393 curr_id = get_prog_id(dev);
1398 err = nla_put_u32(skb, attr, curr_id);
1402 if (*mode != XDP_ATTACHED_NONE)
1403 *mode = XDP_ATTACHED_MULTI;
1410 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1417 xdp = nla_nest_start(skb, IFLA_XDP);
1422 mode = XDP_ATTACHED_NONE;
1423 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1424 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1427 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1428 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1431 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1432 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1436 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1440 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1441 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1446 nla_nest_end(skb, xdp);
1450 nla_nest_cancel(skb, xdp);
1454 static u32 rtnl_get_event(unsigned long event)
1456 u32 rtnl_event_type = IFLA_EVENT_NONE;
1460 rtnl_event_type = IFLA_EVENT_REBOOT;
1462 case NETDEV_FEAT_CHANGE:
1463 rtnl_event_type = IFLA_EVENT_FEATURES;
1465 case NETDEV_BONDING_FAILOVER:
1466 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1468 case NETDEV_NOTIFY_PEERS:
1469 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1471 case NETDEV_RESEND_IGMP:
1472 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1474 case NETDEV_CHANGEINFODATA:
1475 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1481 return rtnl_event_type;
1484 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1486 const struct net_device *upper_dev;
1491 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1493 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1499 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
1501 int ifindex = dev_get_iflink(dev);
1503 if (dev->ifindex == ifindex)
1506 return nla_put_u32(skb, IFLA_LINK, ifindex);
1509 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1510 struct net_device *dev)
1515 ret = dev_get_alias(dev, buf, sizeof(buf));
1516 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1519 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1520 const struct net_device *dev,
1521 struct net *src_net)
1523 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1524 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1526 if (!net_eq(dev_net(dev), link_net)) {
1527 int id = peernet2id_alloc(src_net, link_net);
1529 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1537 static int rtnl_fill_link_af(struct sk_buff *skb,
1538 const struct net_device *dev,
1539 u32 ext_filter_mask)
1541 const struct rtnl_af_ops *af_ops;
1542 struct nlattr *af_spec;
1544 af_spec = nla_nest_start(skb, IFLA_AF_SPEC);
1548 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1552 if (!af_ops->fill_link_af)
1555 af = nla_nest_start(skb, af_ops->family);
1559 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1561 * Caller may return ENODATA to indicate that there
1562 * was no data to be dumped. This is not an error, it
1563 * means we should trim the attribute header and
1566 if (err == -ENODATA)
1567 nla_nest_cancel(skb, af);
1571 nla_nest_end(skb, af);
1574 nla_nest_end(skb, af_spec);
1578 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1579 struct net_device *dev, struct net *src_net,
1580 int type, u32 pid, u32 seq, u32 change,
1581 unsigned int flags, u32 ext_filter_mask,
1582 u32 event, int *new_nsid, int new_ifindex,
1585 struct ifinfomsg *ifm;
1586 struct nlmsghdr *nlh;
1589 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1593 ifm = nlmsg_data(nlh);
1594 ifm->ifi_family = AF_UNSPEC;
1596 ifm->ifi_type = dev->type;
1597 ifm->ifi_index = dev->ifindex;
1598 ifm->ifi_flags = dev_get_flags(dev);
1599 ifm->ifi_change = change;
1601 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1602 goto nla_put_failure;
1604 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1605 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1606 nla_put_u8(skb, IFLA_OPERSTATE,
1607 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1608 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1609 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1610 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1611 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1612 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1613 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1614 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1615 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1616 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1618 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1620 nla_put_iflink(skb, dev) ||
1621 put_master_ifindex(skb, dev) ||
1622 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1624 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1625 nla_put_ifalias(skb, dev) ||
1626 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1627 atomic_read(&dev->carrier_up_count) +
1628 atomic_read(&dev->carrier_down_count)) ||
1629 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
1630 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1631 atomic_read(&dev->carrier_up_count)) ||
1632 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1633 atomic_read(&dev->carrier_down_count)))
1634 goto nla_put_failure;
1636 if (event != IFLA_EVENT_NONE) {
1637 if (nla_put_u32(skb, IFLA_EVENT, event))
1638 goto nla_put_failure;
1641 if (rtnl_fill_link_ifmap(skb, dev))
1642 goto nla_put_failure;
1644 if (dev->addr_len) {
1645 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1646 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1647 goto nla_put_failure;
1650 if (rtnl_phys_port_id_fill(skb, dev))
1651 goto nla_put_failure;
1653 if (rtnl_phys_port_name_fill(skb, dev))
1654 goto nla_put_failure;
1656 if (rtnl_phys_switch_id_fill(skb, dev))
1657 goto nla_put_failure;
1659 if (rtnl_fill_stats(skb, dev))
1660 goto nla_put_failure;
1662 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1663 goto nla_put_failure;
1665 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1666 goto nla_put_failure;
1668 if (rtnl_xdp_fill(skb, dev))
1669 goto nla_put_failure;
1671 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1672 if (rtnl_link_fill(skb, dev) < 0)
1673 goto nla_put_failure;
1676 if (rtnl_fill_link_netnsid(skb, dev, src_net))
1677 goto nla_put_failure;
1680 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1681 goto nla_put_failure;
1683 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1684 goto nla_put_failure;
1688 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1689 goto nla_put_failure_rcu;
1692 nlmsg_end(skb, nlh);
1695 nla_put_failure_rcu:
1698 nlmsg_cancel(skb, nlh);
1702 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1703 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1704 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1705 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1706 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1707 [IFLA_MTU] = { .type = NLA_U32 },
1708 [IFLA_LINK] = { .type = NLA_U32 },
1709 [IFLA_MASTER] = { .type = NLA_U32 },
1710 [IFLA_CARRIER] = { .type = NLA_U8 },
1711 [IFLA_TXQLEN] = { .type = NLA_U32 },
1712 [IFLA_WEIGHT] = { .type = NLA_U32 },
1713 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1714 [IFLA_LINKMODE] = { .type = NLA_U8 },
1715 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1716 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1717 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1718 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1719 * allow 0-length string (needed to remove an alias).
1721 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1722 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1723 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1724 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1725 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1726 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1727 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1728 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1729 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1730 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1731 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1732 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1733 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1734 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1735 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1736 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1737 [IFLA_XDP] = { .type = NLA_NESTED },
1738 [IFLA_EVENT] = { .type = NLA_U32 },
1739 [IFLA_GROUP] = { .type = NLA_U32 },
1740 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1741 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1742 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1743 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1744 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1747 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1748 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1749 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1750 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1751 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1754 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1755 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1756 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1757 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1758 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1759 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1760 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1761 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1762 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1763 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1764 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1765 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1766 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1769 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1770 [IFLA_PORT_VF] = { .type = NLA_U32 },
1771 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1772 .len = PORT_PROFILE_MAX },
1773 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1774 .len = PORT_UUID_MAX },
1775 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1776 .len = PORT_UUID_MAX },
1777 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1778 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1780 /* Unused, but we need to keep it here since user space could
1781 * fill it. It's also broken with regard to NLA_BINARY use in
1782 * combination with structs.
1784 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1785 .len = sizeof(struct ifla_port_vsi) },
1788 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1789 [IFLA_XDP_FD] = { .type = NLA_S32 },
1790 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1791 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1792 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1795 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1797 const struct rtnl_link_ops *ops = NULL;
1798 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1800 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1801 ifla_info_policy, NULL) < 0)
1804 if (linfo[IFLA_INFO_KIND]) {
1805 char kind[MODULE_NAME_LEN];
1807 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1808 ops = rtnl_link_ops_get(kind);
1814 static bool link_master_filtered(struct net_device *dev, int master_idx)
1816 struct net_device *master;
1821 master = netdev_master_upper_dev_get(dev);
1822 if (!master || master->ifindex != master_idx)
1828 static bool link_kind_filtered(const struct net_device *dev,
1829 const struct rtnl_link_ops *kind_ops)
1831 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1837 static bool link_dump_filtered(struct net_device *dev,
1839 const struct rtnl_link_ops *kind_ops)
1841 if (link_master_filtered(dev, master_idx) ||
1842 link_kind_filtered(dev, kind_ops))
1849 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
1850 * @sk: netlink socket
1851 * @netnsid: network namespace identifier
1853 * Returns the network namespace identified by netnsid on success or an error
1854 * pointer on failure.
1856 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
1860 net = get_net_ns_by_id(sock_net(sk), netnsid);
1862 return ERR_PTR(-EINVAL);
1864 /* For now, the caller is required to have CAP_NET_ADMIN in
1865 * the user namespace owning the target net ns.
1867 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1869 return ERR_PTR(-EACCES);
1873 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
1875 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
1876 bool strict_check, struct nlattr **tb,
1877 struct netlink_ext_ack *extack)
1882 struct ifinfomsg *ifm;
1884 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1885 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
1889 ifm = nlmsg_data(nlh);
1890 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
1892 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
1895 if (ifm->ifi_index) {
1896 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
1900 return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
1901 ifla_policy, extack);
1904 /* A hack to preserve kernel<->userspace interface.
1905 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1906 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1907 * what iproute2 < v3.9.0 used.
1908 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1909 * attribute, its netlink message is shorter than struct ifinfomsg.
1911 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
1912 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1914 return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack);
1917 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1919 struct netlink_ext_ack *extack = cb->extack;
1920 const struct nlmsghdr *nlh = cb->nlh;
1921 struct net *net = sock_net(skb->sk);
1922 struct net *tgt_net = net;
1925 struct net_device *dev;
1926 struct hlist_head *head;
1927 struct nlattr *tb[IFLA_MAX+1];
1928 u32 ext_filter_mask = 0;
1929 const struct rtnl_link_ops *kind_ops = NULL;
1930 unsigned int flags = NLM_F_MULTI;
1936 s_idx = cb->args[1];
1938 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
1940 if (cb->strict_check)
1946 for (i = 0; i <= IFLA_MAX; ++i) {
1950 /* new attributes should only be added with strict checking */
1952 case IFLA_TARGET_NETNSID:
1953 netnsid = nla_get_s32(tb[i]);
1954 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
1955 if (IS_ERR(tgt_net)) {
1956 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
1957 return PTR_ERR(tgt_net);
1961 ext_filter_mask = nla_get_u32(tb[i]);
1964 master_idx = nla_get_u32(tb[i]);
1967 kind_ops = linkinfo_to_kind_ops(tb[i]);
1970 if (cb->strict_check) {
1971 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
1977 if (master_idx || kind_ops)
1978 flags |= NLM_F_DUMP_FILTERED;
1981 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1983 head = &tgt_net->dev_index_head[h];
1984 hlist_for_each_entry(dev, head, index_hlist) {
1985 if (link_dump_filtered(dev, master_idx, kind_ops))
1989 err = rtnl_fill_ifinfo(skb, dev, net,
1991 NETLINK_CB(cb->skb).portid,
1992 nlh->nlmsg_seq, 0, flags,
1993 ext_filter_mask, 0, NULL, 0,
1997 if (likely(skb->len))
2011 cb->seq = net->dev_base_seq;
2012 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2019 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2020 struct netlink_ext_ack *exterr)
2022 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
2024 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2026 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2029 /* Examine the link attributes and figure out which
2030 * network namespace we are talking about.
2032 if (tb[IFLA_NET_NS_PID])
2033 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2034 else if (tb[IFLA_NET_NS_FD])
2035 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2037 net = get_net(src_net);
2040 EXPORT_SYMBOL(rtnl_link_get_net);
2042 /* Figure out which network namespace we are talking about by
2043 * examining the link attributes in the following order:
2045 * 1. IFLA_NET_NS_PID
2047 * 3. IFLA_TARGET_NETNSID
2049 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2050 struct nlattr *tb[])
2054 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2055 return rtnl_link_get_net(src_net, tb);
2057 if (!tb[IFLA_TARGET_NETNSID])
2058 return get_net(src_net);
2060 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2062 return ERR_PTR(-EINVAL);
2067 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2068 struct net *src_net,
2069 struct nlattr *tb[], int cap)
2073 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2077 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2079 return ERR_PTR(-EPERM);
2085 /* Verify that rtnetlink requests do not pass additional properties
2086 * potentially referring to different network namespaces.
2088 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2089 struct netlink_ext_ack *extack,
2093 if (netns_id_only) {
2094 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2097 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2101 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2104 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2107 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2113 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2117 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2120 if (tb[IFLA_ADDRESS] &&
2121 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2124 if (tb[IFLA_BROADCAST] &&
2125 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2129 if (tb[IFLA_AF_SPEC]) {
2133 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2134 const struct rtnl_af_ops *af_ops;
2137 af_ops = rtnl_af_lookup(nla_type(af));
2140 return -EAFNOSUPPORT;
2143 if (!af_ops->set_link_af) {
2148 if (af_ops->validate_link_af) {
2149 err = af_ops->validate_link_af(dev, af);
2163 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2166 const struct net_device_ops *ops = dev->netdev_ops;
2168 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2171 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2173 if (dev->type != ARPHRD_INFINIBAND)
2176 return handle_infiniband_guid(dev, ivt, guid_type);
2179 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2181 const struct net_device_ops *ops = dev->netdev_ops;
2184 if (tb[IFLA_VF_MAC]) {
2185 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2188 if (ops->ndo_set_vf_mac)
2189 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2195 if (tb[IFLA_VF_VLAN]) {
2196 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2199 if (ops->ndo_set_vf_vlan)
2200 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2202 htons(ETH_P_8021Q));
2207 if (tb[IFLA_VF_VLAN_LIST]) {
2208 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2209 struct nlattr *attr;
2213 if (!ops->ndo_set_vf_vlan)
2216 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2217 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2218 nla_len(attr) < NLA_HDRLEN) {
2221 if (len >= MAX_VLAN_LIST_LEN)
2223 ivvl[len] = nla_data(attr);
2230 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2231 ivvl[0]->qos, ivvl[0]->vlan_proto);
2236 if (tb[IFLA_VF_TX_RATE]) {
2237 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2238 struct ifla_vf_info ivf;
2241 if (ops->ndo_get_vf_config)
2242 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2247 if (ops->ndo_set_vf_rate)
2248 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2255 if (tb[IFLA_VF_RATE]) {
2256 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2259 if (ops->ndo_set_vf_rate)
2260 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2267 if (tb[IFLA_VF_SPOOFCHK]) {
2268 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2271 if (ops->ndo_set_vf_spoofchk)
2272 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2278 if (tb[IFLA_VF_LINK_STATE]) {
2279 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2282 if (ops->ndo_set_vf_link_state)
2283 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2289 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2290 struct ifla_vf_rss_query_en *ivrssq_en;
2293 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2294 if (ops->ndo_set_vf_rss_query_en)
2295 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2296 ivrssq_en->setting);
2301 if (tb[IFLA_VF_TRUST]) {
2302 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2305 if (ops->ndo_set_vf_trust)
2306 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2311 if (tb[IFLA_VF_IB_NODE_GUID]) {
2312 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2314 if (!ops->ndo_set_vf_guid)
2317 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2320 if (tb[IFLA_VF_IB_PORT_GUID]) {
2321 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2323 if (!ops->ndo_set_vf_guid)
2326 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2332 static int do_set_master(struct net_device *dev, int ifindex,
2333 struct netlink_ext_ack *extack)
2335 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2336 const struct net_device_ops *ops;
2340 if (upper_dev->ifindex == ifindex)
2342 ops = upper_dev->netdev_ops;
2343 if (ops->ndo_del_slave) {
2344 err = ops->ndo_del_slave(upper_dev, dev);
2353 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2356 ops = upper_dev->netdev_ops;
2357 if (ops->ndo_add_slave) {
2358 err = ops->ndo_add_slave(upper_dev, dev, extack);
2368 #define DO_SETLINK_MODIFIED 0x01
2369 /* notify flag means notify + modified. */
2370 #define DO_SETLINK_NOTIFY 0x03
2371 static int do_setlink(const struct sk_buff *skb,
2372 struct net_device *dev, struct ifinfomsg *ifm,
2373 struct netlink_ext_ack *extack,
2374 struct nlattr **tb, char *ifname, int status)
2376 const struct net_device_ops *ops = dev->netdev_ops;
2379 err = validate_linkmsg(dev, tb);
2383 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2384 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2391 err = dev_change_net_namespace(dev, net, ifname);
2395 status |= DO_SETLINK_MODIFIED;
2399 struct rtnl_link_ifmap *u_map;
2402 if (!ops->ndo_set_config) {
2407 if (!netif_device_present(dev)) {
2412 u_map = nla_data(tb[IFLA_MAP]);
2413 k_map.mem_start = (unsigned long) u_map->mem_start;
2414 k_map.mem_end = (unsigned long) u_map->mem_end;
2415 k_map.base_addr = (unsigned short) u_map->base_addr;
2416 k_map.irq = (unsigned char) u_map->irq;
2417 k_map.dma = (unsigned char) u_map->dma;
2418 k_map.port = (unsigned char) u_map->port;
2420 err = ops->ndo_set_config(dev, &k_map);
2424 status |= DO_SETLINK_NOTIFY;
2427 if (tb[IFLA_ADDRESS]) {
2428 struct sockaddr *sa;
2431 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2433 sa = kmalloc(len, GFP_KERNEL);
2438 sa->sa_family = dev->type;
2439 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2441 err = dev_set_mac_address(dev, sa, extack);
2445 status |= DO_SETLINK_MODIFIED;
2449 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2452 status |= DO_SETLINK_MODIFIED;
2455 if (tb[IFLA_GROUP]) {
2456 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2457 status |= DO_SETLINK_NOTIFY;
2461 * Interface selected by interface index but interface
2462 * name provided implies that a name change has been
2465 if (ifm->ifi_index > 0 && ifname[0]) {
2466 err = dev_change_name(dev, ifname);
2469 status |= DO_SETLINK_MODIFIED;
2472 if (tb[IFLA_IFALIAS]) {
2473 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2474 nla_len(tb[IFLA_IFALIAS]));
2477 status |= DO_SETLINK_NOTIFY;
2480 if (tb[IFLA_BROADCAST]) {
2481 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2482 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2485 if (ifm->ifi_flags || ifm->ifi_change) {
2486 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2492 if (tb[IFLA_MASTER]) {
2493 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2496 status |= DO_SETLINK_MODIFIED;
2499 if (tb[IFLA_CARRIER]) {
2500 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2503 status |= DO_SETLINK_MODIFIED;
2506 if (tb[IFLA_TXQLEN]) {
2507 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2509 err = dev_change_tx_queue_len(dev, value);
2512 status |= DO_SETLINK_MODIFIED;
2515 if (tb[IFLA_GSO_MAX_SIZE]) {
2516 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2518 if (max_size > GSO_MAX_SIZE) {
2523 if (dev->gso_max_size ^ max_size) {
2524 netif_set_gso_max_size(dev, max_size);
2525 status |= DO_SETLINK_MODIFIED;
2529 if (tb[IFLA_GSO_MAX_SEGS]) {
2530 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2532 if (max_segs > GSO_MAX_SEGS) {
2537 if (dev->gso_max_segs ^ max_segs) {
2538 dev->gso_max_segs = max_segs;
2539 status |= DO_SETLINK_MODIFIED;
2543 if (tb[IFLA_OPERSTATE])
2544 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2546 if (tb[IFLA_LINKMODE]) {
2547 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2549 write_lock_bh(&dev_base_lock);
2550 if (dev->link_mode ^ value)
2551 status |= DO_SETLINK_NOTIFY;
2552 dev->link_mode = value;
2553 write_unlock_bh(&dev_base_lock);
2556 if (tb[IFLA_VFINFO_LIST]) {
2557 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2558 struct nlattr *attr;
2561 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2562 if (nla_type(attr) != IFLA_VF_INFO ||
2563 nla_len(attr) < NLA_HDRLEN) {
2567 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2568 ifla_vf_policy, NULL);
2571 err = do_setvfinfo(dev, vfinfo);
2574 status |= DO_SETLINK_NOTIFY;
2579 if (tb[IFLA_VF_PORTS]) {
2580 struct nlattr *port[IFLA_PORT_MAX+1];
2581 struct nlattr *attr;
2586 if (!ops->ndo_set_vf_port)
2589 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2590 if (nla_type(attr) != IFLA_VF_PORT ||
2591 nla_len(attr) < NLA_HDRLEN) {
2595 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2596 ifla_port_policy, NULL);
2599 if (!port[IFLA_PORT_VF]) {
2603 vf = nla_get_u32(port[IFLA_PORT_VF]);
2604 err = ops->ndo_set_vf_port(dev, vf, port);
2607 status |= DO_SETLINK_NOTIFY;
2612 if (tb[IFLA_PORT_SELF]) {
2613 struct nlattr *port[IFLA_PORT_MAX+1];
2615 err = nla_parse_nested(port, IFLA_PORT_MAX,
2616 tb[IFLA_PORT_SELF], ifla_port_policy,
2622 if (ops->ndo_set_vf_port)
2623 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2626 status |= DO_SETLINK_NOTIFY;
2629 if (tb[IFLA_AF_SPEC]) {
2633 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2634 const struct rtnl_af_ops *af_ops;
2638 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2640 err = af_ops->set_link_af(dev, af);
2647 status |= DO_SETLINK_NOTIFY;
2652 if (tb[IFLA_PROTO_DOWN]) {
2653 err = dev_change_proto_down(dev,
2654 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2657 status |= DO_SETLINK_NOTIFY;
2661 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2664 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2665 ifla_xdp_policy, NULL);
2669 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2674 if (xdp[IFLA_XDP_FLAGS]) {
2675 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2676 if (xdp_flags & ~XDP_FLAGS_MASK) {
2680 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2686 if (xdp[IFLA_XDP_FD]) {
2687 err = dev_change_xdp_fd(dev, extack,
2688 nla_get_s32(xdp[IFLA_XDP_FD]),
2692 status |= DO_SETLINK_NOTIFY;
2697 if (status & DO_SETLINK_MODIFIED) {
2698 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2699 netdev_state_change(dev);
2702 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2709 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2710 struct netlink_ext_ack *extack)
2712 struct net *net = sock_net(skb->sk);
2713 struct ifinfomsg *ifm;
2714 struct net_device *dev;
2716 struct nlattr *tb[IFLA_MAX+1];
2717 char ifname[IFNAMSIZ];
2719 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2724 err = rtnl_ensure_unique_netns(tb, extack, false);
2728 if (tb[IFLA_IFNAME])
2729 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2734 ifm = nlmsg_data(nlh);
2735 if (ifm->ifi_index > 0)
2736 dev = __dev_get_by_index(net, ifm->ifi_index);
2737 else if (tb[IFLA_IFNAME])
2738 dev = __dev_get_by_name(net, ifname);
2747 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2752 static int rtnl_group_dellink(const struct net *net, int group)
2754 struct net_device *dev, *aux;
2755 LIST_HEAD(list_kill);
2761 for_each_netdev(net, dev) {
2762 if (dev->group == group) {
2763 const struct rtnl_link_ops *ops;
2766 ops = dev->rtnl_link_ops;
2767 if (!ops || !ops->dellink)
2775 for_each_netdev_safe(net, dev, aux) {
2776 if (dev->group == group) {
2777 const struct rtnl_link_ops *ops;
2779 ops = dev->rtnl_link_ops;
2780 ops->dellink(dev, &list_kill);
2783 unregister_netdevice_many(&list_kill);
2788 int rtnl_delete_link(struct net_device *dev)
2790 const struct rtnl_link_ops *ops;
2791 LIST_HEAD(list_kill);
2793 ops = dev->rtnl_link_ops;
2794 if (!ops || !ops->dellink)
2797 ops->dellink(dev, &list_kill);
2798 unregister_netdevice_many(&list_kill);
2802 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2804 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2805 struct netlink_ext_ack *extack)
2807 struct net *net = sock_net(skb->sk);
2808 struct net *tgt_net = net;
2809 struct net_device *dev = NULL;
2810 struct ifinfomsg *ifm;
2811 char ifname[IFNAMSIZ];
2812 struct nlattr *tb[IFLA_MAX+1];
2816 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2820 err = rtnl_ensure_unique_netns(tb, extack, true);
2824 if (tb[IFLA_IFNAME])
2825 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2827 if (tb[IFLA_TARGET_NETNSID]) {
2828 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
2829 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
2830 if (IS_ERR(tgt_net))
2831 return PTR_ERR(tgt_net);
2835 ifm = nlmsg_data(nlh);
2836 if (ifm->ifi_index > 0)
2837 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
2838 else if (tb[IFLA_IFNAME])
2839 dev = __dev_get_by_name(tgt_net, ifname);
2840 else if (tb[IFLA_GROUP])
2841 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
2846 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
2852 err = rtnl_delete_link(dev);
2861 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2863 unsigned int old_flags;
2866 old_flags = dev->flags;
2867 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2868 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2874 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2875 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
2877 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2878 __dev_notify_flags(dev, old_flags, ~0U);
2882 EXPORT_SYMBOL(rtnl_configure_link);
2884 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
2885 unsigned char name_assign_type,
2886 const struct rtnl_link_ops *ops,
2887 struct nlattr *tb[],
2888 struct netlink_ext_ack *extack)
2890 struct net_device *dev;
2891 unsigned int num_tx_queues = 1;
2892 unsigned int num_rx_queues = 1;
2894 if (tb[IFLA_NUM_TX_QUEUES])
2895 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2896 else if (ops->get_num_tx_queues)
2897 num_tx_queues = ops->get_num_tx_queues();
2899 if (tb[IFLA_NUM_RX_QUEUES])
2900 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2901 else if (ops->get_num_rx_queues)
2902 num_rx_queues = ops->get_num_rx_queues();
2904 if (num_tx_queues < 1 || num_tx_queues > 4096) {
2905 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
2906 return ERR_PTR(-EINVAL);
2909 if (num_rx_queues < 1 || num_rx_queues > 4096) {
2910 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
2911 return ERR_PTR(-EINVAL);
2914 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2915 ops->setup, num_tx_queues, num_rx_queues);
2917 return ERR_PTR(-ENOMEM);
2919 dev_net_set(dev, net);
2920 dev->rtnl_link_ops = ops;
2921 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2924 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2925 if (tb[IFLA_ADDRESS]) {
2926 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2927 nla_len(tb[IFLA_ADDRESS]));
2928 dev->addr_assign_type = NET_ADDR_SET;
2930 if (tb[IFLA_BROADCAST])
2931 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2932 nla_len(tb[IFLA_BROADCAST]));
2933 if (tb[IFLA_TXQLEN])
2934 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2935 if (tb[IFLA_OPERSTATE])
2936 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2937 if (tb[IFLA_LINKMODE])
2938 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2940 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2941 if (tb[IFLA_GSO_MAX_SIZE])
2942 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
2943 if (tb[IFLA_GSO_MAX_SEGS])
2944 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2948 EXPORT_SYMBOL(rtnl_create_link);
2950 static int rtnl_group_changelink(const struct sk_buff *skb,
2951 struct net *net, int group,
2952 struct ifinfomsg *ifm,
2953 struct netlink_ext_ack *extack,
2956 struct net_device *dev, *aux;
2959 for_each_netdev_safe(net, dev, aux) {
2960 if (dev->group == group) {
2961 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2970 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2971 struct nlattr **attr, struct netlink_ext_ack *extack)
2973 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
2974 unsigned char name_assign_type = NET_NAME_USER;
2975 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
2976 const struct rtnl_link_ops *m_ops = NULL;
2977 struct net_device *master_dev = NULL;
2978 struct net *net = sock_net(skb->sk);
2979 const struct rtnl_link_ops *ops;
2980 struct nlattr *tb[IFLA_MAX + 1];
2981 struct net *dest_net, *link_net;
2982 struct nlattr **slave_data;
2983 char kind[MODULE_NAME_LEN];
2984 struct net_device *dev;
2985 struct ifinfomsg *ifm;
2986 char ifname[IFNAMSIZ];
2987 struct nlattr **data;
2990 #ifdef CONFIG_MODULES
2993 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2997 err = rtnl_ensure_unique_netns(tb, extack, false);
3001 if (tb[IFLA_IFNAME])
3002 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3006 ifm = nlmsg_data(nlh);
3007 if (ifm->ifi_index > 0)
3008 dev = __dev_get_by_index(net, ifm->ifi_index);
3011 dev = __dev_get_by_name(net, ifname);
3017 master_dev = netdev_master_upper_dev_get(dev);
3019 m_ops = master_dev->rtnl_link_ops;
3022 err = validate_linkmsg(dev, tb);
3026 if (tb[IFLA_LINKINFO]) {
3027 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
3028 tb[IFLA_LINKINFO], ifla_info_policy,
3033 memset(linkinfo, 0, sizeof(linkinfo));
3035 if (linkinfo[IFLA_INFO_KIND]) {
3036 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3037 ops = rtnl_link_ops_get(kind);
3045 if (ops->maxtype > RTNL_MAX_TYPE)
3048 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3049 err = nla_parse_nested(attr, ops->maxtype,
3050 linkinfo[IFLA_INFO_DATA],
3051 ops->policy, extack);
3056 if (ops->validate) {
3057 err = ops->validate(tb, data, extack);
3065 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3068 if (m_ops->slave_maxtype &&
3069 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3070 err = nla_parse_nested(slave_attr, m_ops->slave_maxtype,
3071 linkinfo[IFLA_INFO_SLAVE_DATA],
3072 m_ops->slave_policy, extack);
3075 slave_data = slave_attr;
3082 if (nlh->nlmsg_flags & NLM_F_EXCL)
3084 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3087 if (linkinfo[IFLA_INFO_DATA]) {
3088 if (!ops || ops != dev->rtnl_link_ops ||
3092 err = ops->changelink(dev, tb, data, extack);
3095 status |= DO_SETLINK_NOTIFY;
3098 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3099 if (!m_ops || !m_ops->slave_changelink)
3102 err = m_ops->slave_changelink(master_dev, dev, tb,
3103 slave_data, extack);
3106 status |= DO_SETLINK_NOTIFY;
3109 return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
3112 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3113 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
3114 return rtnl_group_changelink(skb, net,
3115 nla_get_u32(tb[IFLA_GROUP]),
3120 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3124 #ifdef CONFIG_MODULES
3127 request_module("rtnl-link-%s", kind);
3129 ops = rtnl_link_ops_get(kind);
3134 NL_SET_ERR_MSG(extack, "Unknown device type");
3142 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3143 name_assign_type = NET_NAME_ENUM;
3146 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3147 if (IS_ERR(dest_net))
3148 return PTR_ERR(dest_net);
3150 if (tb[IFLA_LINK_NETNSID]) {
3151 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3153 link_net = get_net_ns_by_id(dest_net, id);
3155 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3160 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3166 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3167 name_assign_type, ops, tb, extack);
3173 dev->ifindex = ifm->ifi_index;
3176 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3177 /* Drivers should call free_netdev() in ->destructor
3178 * and unregister it on failure after registration
3179 * so that device could be finally freed in rtnl_unlock.
3182 /* If device is not registered at all, free it now */
3183 if (dev->reg_state == NETREG_UNINITIALIZED)
3188 err = register_netdevice(dev);
3194 err = rtnl_configure_link(dev, ifm);
3196 goto out_unregister;
3198 err = dev_change_net_namespace(dev, dest_net, ifname);
3200 goto out_unregister;
3202 if (tb[IFLA_MASTER]) {
3203 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3205 goto out_unregister;
3214 LIST_HEAD(list_kill);
3216 ops->dellink(dev, &list_kill);
3217 unregister_netdevice_many(&list_kill);
3219 unregister_netdevice(dev);
3224 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3225 struct netlink_ext_ack *extack)
3227 struct nlattr **attr;
3230 attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
3234 ret = __rtnl_newlink(skb, nlh, attr, extack);
3239 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3240 const struct nlmsghdr *nlh,
3242 struct netlink_ext_ack *extack)
3244 struct ifinfomsg *ifm;
3247 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3248 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3252 if (!netlink_strict_get_check(skb))
3253 return nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
3256 ifm = nlmsg_data(nlh);
3257 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3259 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3263 err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
3268 for (i = 0; i <= IFLA_MAX; i++) {
3275 case IFLA_TARGET_NETNSID:
3278 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3286 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3287 struct netlink_ext_ack *extack)
3289 struct net *net = sock_net(skb->sk);
3290 struct net *tgt_net = net;
3291 struct ifinfomsg *ifm;
3292 char ifname[IFNAMSIZ];
3293 struct nlattr *tb[IFLA_MAX+1];
3294 struct net_device *dev = NULL;
3295 struct sk_buff *nskb;
3298 u32 ext_filter_mask = 0;
3300 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3304 err = rtnl_ensure_unique_netns(tb, extack, true);
3308 if (tb[IFLA_TARGET_NETNSID]) {
3309 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3310 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3311 if (IS_ERR(tgt_net))
3312 return PTR_ERR(tgt_net);
3315 if (tb[IFLA_IFNAME])
3316 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3318 if (tb[IFLA_EXT_MASK])
3319 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3322 ifm = nlmsg_data(nlh);
3323 if (ifm->ifi_index > 0)
3324 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3325 else if (tb[IFLA_IFNAME])
3326 dev = __dev_get_by_name(tgt_net, ifname);
3335 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3339 err = rtnl_fill_ifinfo(nskb, dev, net,
3340 RTM_NEWLINK, NETLINK_CB(skb).portid,
3341 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3342 0, NULL, 0, netnsid);
3344 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3345 WARN_ON(err == -EMSGSIZE);
3348 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3356 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3358 struct net *net = sock_net(skb->sk);
3359 struct net_device *dev;
3360 struct nlattr *tb[IFLA_MAX+1];
3361 u32 ext_filter_mask = 0;
3362 u16 min_ifinfo_dump_size = 0;
3365 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3366 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3367 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3369 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3370 if (tb[IFLA_EXT_MASK])
3371 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3374 if (!ext_filter_mask)
3375 return NLMSG_GOODSIZE;
3377 * traverse the list of net devices and compute the minimum
3378 * buffer size based upon the filter mask.
3381 for_each_netdev_rcu(net, dev) {
3382 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
3388 return nlmsg_total_size(min_ifinfo_dump_size);
3391 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3394 int s_idx = cb->family;
3395 int type = cb->nlh->nlmsg_type - RTM_BASE;
3401 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3402 struct rtnl_link **tab;
3403 struct rtnl_link *link;
3404 rtnl_dumpit_func dumpit;
3406 if (idx < s_idx || idx == PF_PACKET)
3409 if (type < 0 || type >= RTM_NR_MSGTYPES)
3412 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3420 dumpit = link->dumpit;
3425 memset(&cb->args[0], 0, sizeof(cb->args));
3429 ret = dumpit(skb, cb);
3435 return skb->len ? : ret;
3438 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3439 unsigned int change,
3440 u32 event, gfp_t flags, int *new_nsid,
3443 struct net *net = dev_net(dev);
3444 struct sk_buff *skb;
3446 size_t if_info_size;
3448 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3452 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3453 type, 0, 0, change, 0, 0, event,
3454 new_nsid, new_ifindex, -1);
3456 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3457 WARN_ON(err == -EMSGSIZE);
3464 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3468 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3470 struct net *net = dev_net(dev);
3472 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3475 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3476 unsigned int change, u32 event,
3477 gfp_t flags, int *new_nsid, int new_ifindex)
3479 struct sk_buff *skb;
3481 if (dev->reg_state != NETREG_REGISTERED)
3484 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3487 rtmsg_ifinfo_send(skb, dev, flags);
3490 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3493 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3497 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3498 gfp_t flags, int *new_nsid, int new_ifindex)
3500 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3501 new_nsid, new_ifindex);
3504 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3505 struct net_device *dev,
3506 u8 *addr, u16 vid, u32 pid, u32 seq,
3507 int type, unsigned int flags,
3508 int nlflags, u16 ndm_state)
3510 struct nlmsghdr *nlh;
3513 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3517 ndm = nlmsg_data(nlh);
3518 ndm->ndm_family = AF_BRIDGE;
3521 ndm->ndm_flags = flags;
3523 ndm->ndm_ifindex = dev->ifindex;
3524 ndm->ndm_state = ndm_state;
3526 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3527 goto nla_put_failure;
3529 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3530 goto nla_put_failure;
3532 nlmsg_end(skb, nlh);
3536 nlmsg_cancel(skb, nlh);
3540 static inline size_t rtnl_fdb_nlmsg_size(void)
3542 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3543 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3544 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3548 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3551 struct net *net = dev_net(dev);
3552 struct sk_buff *skb;
3555 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3559 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3560 0, 0, type, NTF_SELF, 0, ndm_state);
3566 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3569 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3573 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3575 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3576 struct nlattr *tb[],
3577 struct net_device *dev,
3578 const unsigned char *addr, u16 vid,
3583 /* If aging addresses are supported device will need to
3584 * implement its own handler for this.
3586 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3587 pr_info("%s: FDB only supports static addresses\n", dev->name);
3592 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3596 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3597 err = dev_uc_add_excl(dev, addr);
3598 else if (is_multicast_ether_addr(addr))
3599 err = dev_mc_add_excl(dev, addr);
3601 /* Only return duplicate errors if NLM_F_EXCL is set */
3602 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3607 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3609 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
3610 struct netlink_ext_ack *extack)
3615 if (nla_len(vlan_attr) != sizeof(u16)) {
3616 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
3620 vid = nla_get_u16(vlan_attr);
3622 if (!vid || vid >= VLAN_VID_MASK) {
3623 NL_SET_ERR_MSG(extack, "invalid vlan id");
3631 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3632 struct netlink_ext_ack *extack)
3634 struct net *net = sock_net(skb->sk);
3636 struct nlattr *tb[NDA_MAX+1];
3637 struct net_device *dev;
3642 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3646 ndm = nlmsg_data(nlh);
3647 if (ndm->ndm_ifindex == 0) {
3648 NL_SET_ERR_MSG(extack, "invalid ifindex");
3652 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3654 NL_SET_ERR_MSG(extack, "unknown ifindex");
3658 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3659 NL_SET_ERR_MSG(extack, "invalid address");
3663 if (dev->type != ARPHRD_ETHER) {
3664 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
3668 addr = nla_data(tb[NDA_LLADDR]);
3670 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3676 /* Support fdb on master device the net/bridge default case */
3677 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3678 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3679 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3680 const struct net_device_ops *ops = br_dev->netdev_ops;
3682 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3683 nlh->nlmsg_flags, extack);
3687 ndm->ndm_flags &= ~NTF_MASTER;
3690 /* Embedded bridge, macvlan, and any other device support */
3691 if ((ndm->ndm_flags & NTF_SELF)) {
3692 if (dev->netdev_ops->ndo_fdb_add)
3693 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3698 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3702 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3704 ndm->ndm_flags &= ~NTF_SELF;
3712 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3714 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3715 struct nlattr *tb[],
3716 struct net_device *dev,
3717 const unsigned char *addr, u16 vid)
3721 /* If aging addresses are supported device will need to
3722 * implement its own handler for this.
3724 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3725 pr_info("%s: FDB only supports static addresses\n", dev->name);
3729 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3730 err = dev_uc_del(dev, addr);
3731 else if (is_multicast_ether_addr(addr))
3732 err = dev_mc_del(dev, addr);
3736 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3738 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3739 struct netlink_ext_ack *extack)
3741 struct net *net = sock_net(skb->sk);
3743 struct nlattr *tb[NDA_MAX+1];
3744 struct net_device *dev;
3749 if (!netlink_capable(skb, CAP_NET_ADMIN))
3752 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3756 ndm = nlmsg_data(nlh);
3757 if (ndm->ndm_ifindex == 0) {
3758 NL_SET_ERR_MSG(extack, "invalid ifindex");
3762 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3764 NL_SET_ERR_MSG(extack, "unknown ifindex");
3768 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3769 NL_SET_ERR_MSG(extack, "invalid address");
3773 if (dev->type != ARPHRD_ETHER) {
3774 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
3778 addr = nla_data(tb[NDA_LLADDR]);
3780 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3786 /* Support fdb on master device the net/bridge default case */
3787 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3788 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3789 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3790 const struct net_device_ops *ops = br_dev->netdev_ops;
3792 if (ops->ndo_fdb_del)
3793 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3798 ndm->ndm_flags &= ~NTF_MASTER;
3801 /* Embedded bridge, macvlan, and any other device support */
3802 if (ndm->ndm_flags & NTF_SELF) {
3803 if (dev->netdev_ops->ndo_fdb_del)
3804 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3807 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3810 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3812 ndm->ndm_flags &= ~NTF_SELF;
3819 static int nlmsg_populate_fdb(struct sk_buff *skb,
3820 struct netlink_callback *cb,
3821 struct net_device *dev,
3823 struct netdev_hw_addr_list *list)
3825 struct netdev_hw_addr *ha;
3829 portid = NETLINK_CB(cb->skb).portid;
3830 seq = cb->nlh->nlmsg_seq;
3832 list_for_each_entry(ha, &list->list, list) {
3833 if (*idx < cb->args[2])
3836 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3838 RTM_NEWNEIGH, NTF_SELF,
3839 NLM_F_MULTI, NUD_PERMANENT);
3849 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3850 * @nlh: netlink message header
3853 * Default netdevice operation to dump the existing unicast address list.
3854 * Returns number of addresses from list put in skb.
3856 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3857 struct netlink_callback *cb,
3858 struct net_device *dev,
3859 struct net_device *filter_dev,
3864 if (dev->type != ARPHRD_ETHER)
3867 netif_addr_lock_bh(dev);
3868 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3871 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3873 netif_addr_unlock_bh(dev);
3876 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3878 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
3879 int *br_idx, int *brport_idx,
3880 struct netlink_ext_ack *extack)
3882 struct nlattr *tb[NDA_MAX + 1];
3886 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
3887 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
3891 ndm = nlmsg_data(nlh);
3892 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
3893 ndm->ndm_flags || ndm->ndm_type) {
3894 NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
3898 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
3903 *brport_idx = ndm->ndm_ifindex;
3904 for (i = 0; i <= NDA_MAX; ++i) {
3910 if (nla_len(tb[i]) != sizeof(u32)) {
3911 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
3914 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
3917 if (nla_len(tb[i]) != sizeof(u32)) {
3918 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
3921 *br_idx = nla_get_u32(tb[NDA_MASTER]);
3924 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
3932 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
3933 int *br_idx, int *brport_idx,
3934 struct netlink_ext_ack *extack)
3936 struct nlattr *tb[IFLA_MAX+1];
3939 /* A hack to preserve kernel<->userspace interface.
3940 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3941 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3942 * So, check for ndmsg with an optional u32 attribute (not used here).
3943 * Fortunately these sizes don't conflict with the size of ifinfomsg
3944 * with an optional attribute.
3946 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
3947 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
3948 nla_attr_size(sizeof(u32)))) {
3949 struct ifinfomsg *ifm;
3951 err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
3952 ifla_policy, extack);
3955 } else if (err == 0) {
3956 if (tb[IFLA_MASTER])
3957 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
3960 ifm = nlmsg_data(nlh);
3961 *brport_idx = ifm->ifi_index;
3966 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3968 struct net_device *dev;
3969 struct net_device *br_dev = NULL;
3970 const struct net_device_ops *ops = NULL;
3971 const struct net_device_ops *cops = NULL;
3972 struct net *net = sock_net(skb->sk);
3973 struct hlist_head *head;
3981 if (cb->strict_check)
3982 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
3985 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
3991 br_dev = __dev_get_by_index(net, br_idx);
3995 ops = br_dev->netdev_ops;
3999 s_idx = cb->args[1];
4001 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4003 head = &net->dev_index_head[h];
4004 hlist_for_each_entry(dev, head, index_hlist) {
4006 if (brport_idx && (dev->ifindex != brport_idx))
4009 if (!br_idx) { /* user did not specify a specific bridge */
4010 if (dev->priv_flags & IFF_BRIDGE_PORT) {
4011 br_dev = netdev_master_upper_dev_get(dev);
4012 cops = br_dev->netdev_ops;
4015 if (dev != br_dev &&
4016 !(dev->priv_flags & IFF_BRIDGE_PORT))
4019 if (br_dev != netdev_master_upper_dev_get(dev) &&
4020 !(dev->priv_flags & IFF_EBRIDGE))
4028 if (dev->priv_flags & IFF_BRIDGE_PORT) {
4029 if (cops && cops->ndo_fdb_dump) {
4030 err = cops->ndo_fdb_dump(skb, cb,
4033 if (err == -EMSGSIZE)
4038 if (dev->netdev_ops->ndo_fdb_dump)
4039 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4043 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4045 if (err == -EMSGSIZE)
4050 /* reset fdb offset to 0 for rest of the interfaces */
4066 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4067 struct nlattr **tb, u8 *ndm_flags,
4068 int *br_idx, int *brport_idx, u8 **addr,
4069 u16 *vid, struct netlink_ext_ack *extack)
4074 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4075 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4079 ndm = nlmsg_data(nlh);
4080 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4082 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4086 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4087 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4091 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
4092 nda_policy, extack);
4096 *ndm_flags = ndm->ndm_flags;
4097 *brport_idx = ndm->ndm_ifindex;
4098 for (i = 0; i <= NDA_MAX; ++i) {
4104 *br_idx = nla_get_u32(tb[i]);
4107 if (nla_len(tb[i]) != ETH_ALEN) {
4108 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4111 *addr = nla_data(tb[i]);
4114 err = fdb_vid_parse(tb[i], vid, extack);
4121 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4129 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4130 struct netlink_ext_ack *extack)
4132 struct net_device *dev = NULL, *br_dev = NULL;
4133 const struct net_device_ops *ops = NULL;
4134 struct net *net = sock_net(in_skb->sk);
4135 struct nlattr *tb[NDA_MAX + 1];
4136 struct sk_buff *skb;
4144 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4145 &brport_idx, &addr, &vid, extack);
4150 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4155 dev = __dev_get_by_index(net, brport_idx);
4157 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4164 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4168 br_dev = __dev_get_by_index(net, br_idx);
4170 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4173 ops = br_dev->netdev_ops;
4177 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4178 if (!(dev->priv_flags & IFF_BRIDGE_PORT)) {
4179 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4182 br_dev = netdev_master_upper_dev_get(dev);
4184 NL_SET_ERR_MSG(extack, "Master of device not found");
4187 ops = br_dev->netdev_ops;
4189 if (!(ndm_flags & NTF_SELF)) {
4190 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4193 ops = dev->netdev_ops;
4197 if (!br_dev && !dev) {
4198 NL_SET_ERR_MSG(extack, "No device specified");
4202 if (!ops || !ops->ndo_fdb_get) {
4203 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4207 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4213 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4214 NETLINK_CB(in_skb).portid,
4215 nlh->nlmsg_seq, extack);
4219 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4225 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4226 unsigned int attrnum, unsigned int flag)
4229 return nla_put_u8(skb, attrnum, !!(flags & flag));
4233 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4234 struct net_device *dev, u16 mode,
4235 u32 flags, u32 mask, int nlflags,
4237 int (*vlan_fill)(struct sk_buff *skb,
4238 struct net_device *dev,
4241 struct nlmsghdr *nlh;
4242 struct ifinfomsg *ifm;
4243 struct nlattr *br_afspec;
4244 struct nlattr *protinfo;
4245 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4246 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4249 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4253 ifm = nlmsg_data(nlh);
4254 ifm->ifi_family = AF_BRIDGE;
4256 ifm->ifi_type = dev->type;
4257 ifm->ifi_index = dev->ifindex;
4258 ifm->ifi_flags = dev_get_flags(dev);
4259 ifm->ifi_change = 0;
4262 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4263 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4264 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4266 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4268 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4269 (dev->ifindex != dev_get_iflink(dev) &&
4270 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4271 goto nla_put_failure;
4273 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
4275 goto nla_put_failure;
4277 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4278 nla_nest_cancel(skb, br_afspec);
4279 goto nla_put_failure;
4282 if (mode != BRIDGE_MODE_UNDEF) {
4283 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4284 nla_nest_cancel(skb, br_afspec);
4285 goto nla_put_failure;
4289 err = vlan_fill(skb, dev, filter_mask);
4291 nla_nest_cancel(skb, br_afspec);
4292 goto nla_put_failure;
4295 nla_nest_end(skb, br_afspec);
4297 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
4299 goto nla_put_failure;
4301 if (brport_nla_put_flag(skb, flags, mask,
4302 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4303 brport_nla_put_flag(skb, flags, mask,
4304 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4305 brport_nla_put_flag(skb, flags, mask,
4306 IFLA_BRPORT_FAST_LEAVE,
4307 BR_MULTICAST_FAST_LEAVE) ||
4308 brport_nla_put_flag(skb, flags, mask,
4309 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4310 brport_nla_put_flag(skb, flags, mask,
4311 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4312 brport_nla_put_flag(skb, flags, mask,
4313 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4314 brport_nla_put_flag(skb, flags, mask,
4315 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4316 brport_nla_put_flag(skb, flags, mask,
4317 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
4318 nla_nest_cancel(skb, protinfo);
4319 goto nla_put_failure;
4322 nla_nest_end(skb, protinfo);
4324 nlmsg_end(skb, nlh);
4327 nlmsg_cancel(skb, nlh);
4328 return err ? err : -EMSGSIZE;
4330 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4332 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4333 bool strict_check, u32 *filter_mask,
4334 struct netlink_ext_ack *extack)
4336 struct nlattr *tb[IFLA_MAX+1];
4340 struct ifinfomsg *ifm;
4342 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4343 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4347 ifm = nlmsg_data(nlh);
4348 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4349 ifm->ifi_change || ifm->ifi_index) {
4350 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4354 err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb,
4355 IFLA_MAX, ifla_policy, extack);
4357 err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb,
4358 IFLA_MAX, ifla_policy, extack);
4363 /* new attributes should only be added with strict checking */
4364 for (i = 0; i <= IFLA_MAX; ++i) {
4370 *filter_mask = nla_get_u32(tb[i]);
4374 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4383 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4385 const struct nlmsghdr *nlh = cb->nlh;
4386 struct net *net = sock_net(skb->sk);
4387 struct net_device *dev;
4389 u32 portid = NETLINK_CB(cb->skb).portid;
4390 u32 seq = nlh->nlmsg_seq;
4391 u32 filter_mask = 0;
4394 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4396 if (err < 0 && cb->strict_check)
4400 for_each_netdev_rcu(net, dev) {
4401 const struct net_device_ops *ops = dev->netdev_ops;
4402 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4404 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4405 if (idx >= cb->args[0]) {
4406 err = br_dev->netdev_ops->ndo_bridge_getlink(
4407 skb, portid, seq, dev,
4408 filter_mask, NLM_F_MULTI);
4409 if (err < 0 && err != -EOPNOTSUPP) {
4410 if (likely(skb->len))
4419 if (ops->ndo_bridge_getlink) {
4420 if (idx >= cb->args[0]) {
4421 err = ops->ndo_bridge_getlink(skb, portid,
4425 if (err < 0 && err != -EOPNOTSUPP) {
4426 if (likely(skb->len))
4443 static inline size_t bridge_nlmsg_size(void)
4445 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4446 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4447 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4448 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4449 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
4450 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
4451 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
4452 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
4453 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
4454 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
4455 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
4458 static int rtnl_bridge_notify(struct net_device *dev)
4460 struct net *net = dev_net(dev);
4461 struct sk_buff *skb;
4462 int err = -EOPNOTSUPP;
4464 if (!dev->netdev_ops->ndo_bridge_getlink)
4467 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4473 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4480 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4483 WARN_ON(err == -EMSGSIZE);
4486 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4490 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4491 struct netlink_ext_ack *extack)
4493 struct net *net = sock_net(skb->sk);
4494 struct ifinfomsg *ifm;
4495 struct net_device *dev;
4496 struct nlattr *br_spec, *attr = NULL;
4497 int rem, err = -EOPNOTSUPP;
4499 bool have_flags = false;
4501 if (nlmsg_len(nlh) < sizeof(*ifm))
4504 ifm = nlmsg_data(nlh);
4505 if (ifm->ifi_family != AF_BRIDGE)
4506 return -EPFNOSUPPORT;
4508 dev = __dev_get_by_index(net, ifm->ifi_index);
4510 NL_SET_ERR_MSG(extack, "unknown ifindex");
4514 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4516 nla_for_each_nested(attr, br_spec, rem) {
4517 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4518 if (nla_len(attr) < sizeof(flags))
4522 flags = nla_get_u16(attr);
4528 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4529 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4531 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
4536 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
4541 flags &= ~BRIDGE_FLAGS_MASTER;
4544 if ((flags & BRIDGE_FLAGS_SELF)) {
4545 if (!dev->netdev_ops->ndo_bridge_setlink)
4548 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4552 flags &= ~BRIDGE_FLAGS_SELF;
4554 /* Generate event to notify upper layer of bridge
4557 err = rtnl_bridge_notify(dev);
4562 memcpy(nla_data(attr), &flags, sizeof(flags));
4567 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
4568 struct netlink_ext_ack *extack)
4570 struct net *net = sock_net(skb->sk);
4571 struct ifinfomsg *ifm;
4572 struct net_device *dev;
4573 struct nlattr *br_spec, *attr = NULL;
4574 int rem, err = -EOPNOTSUPP;
4576 bool have_flags = false;
4578 if (nlmsg_len(nlh) < sizeof(*ifm))
4581 ifm = nlmsg_data(nlh);
4582 if (ifm->ifi_family != AF_BRIDGE)
4583 return -EPFNOSUPPORT;
4585 dev = __dev_get_by_index(net, ifm->ifi_index);
4587 NL_SET_ERR_MSG(extack, "unknown ifindex");
4591 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4593 nla_for_each_nested(attr, br_spec, rem) {
4594 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4595 if (nla_len(attr) < sizeof(flags))
4599 flags = nla_get_u16(attr);
4605 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4606 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4608 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
4613 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
4617 flags &= ~BRIDGE_FLAGS_MASTER;
4620 if ((flags & BRIDGE_FLAGS_SELF)) {
4621 if (!dev->netdev_ops->ndo_bridge_dellink)
4624 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
4628 flags &= ~BRIDGE_FLAGS_SELF;
4630 /* Generate event to notify upper layer of bridge
4633 err = rtnl_bridge_notify(dev);
4638 memcpy(nla_data(attr), &flags, sizeof(flags));
4643 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
4645 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
4646 (!idxattr || idxattr == attrid);
4649 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
4650 static int rtnl_get_offload_stats_attr_size(int attr_id)
4653 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
4654 return sizeof(struct rtnl_link_stats64);
4660 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
4663 struct nlattr *attr = NULL;
4668 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4669 dev->netdev_ops->ndo_get_offload_stats))
4672 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4673 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4674 if (attr_id < *prividx)
4677 size = rtnl_get_offload_stats_attr_size(attr_id);
4681 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4684 attr = nla_reserve_64bit(skb, attr_id, size,
4685 IFLA_OFFLOAD_XSTATS_UNSPEC);
4687 goto nla_put_failure;
4689 attr_data = nla_data(attr);
4690 memset(attr_data, 0, size);
4691 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
4694 goto get_offload_stats_failure;
4705 get_offload_stats_failure:
4710 static int rtnl_get_offload_stats_size(const struct net_device *dev)
4716 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4717 dev->netdev_ops->ndo_get_offload_stats))
4720 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4721 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4722 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4724 size = rtnl_get_offload_stats_attr_size(attr_id);
4725 nla_size += nla_total_size_64bit(size);
4729 nla_size += nla_total_size(0);
4734 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
4735 int type, u32 pid, u32 seq, u32 change,
4736 unsigned int flags, unsigned int filter_mask,
4737 int *idxattr, int *prividx)
4739 struct if_stats_msg *ifsm;
4740 struct nlmsghdr *nlh;
4741 struct nlattr *attr;
4742 int s_prividx = *prividx;
4747 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
4751 ifsm = nlmsg_data(nlh);
4752 ifsm->family = PF_UNSPEC;
4755 ifsm->ifindex = dev->ifindex;
4756 ifsm->filter_mask = filter_mask;
4758 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
4759 struct rtnl_link_stats64 *sp;
4761 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
4762 sizeof(struct rtnl_link_stats64),
4765 goto nla_put_failure;
4767 sp = nla_data(attr);
4768 dev_get_stats(dev, sp);
4771 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
4772 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4774 if (ops && ops->fill_linkxstats) {
4775 *idxattr = IFLA_STATS_LINK_XSTATS;
4776 attr = nla_nest_start(skb,
4777 IFLA_STATS_LINK_XSTATS);
4779 goto nla_put_failure;
4781 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4782 nla_nest_end(skb, attr);
4784 goto nla_put_failure;
4789 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
4791 const struct rtnl_link_ops *ops = NULL;
4792 const struct net_device *master;
4794 master = netdev_master_upper_dev_get(dev);
4796 ops = master->rtnl_link_ops;
4797 if (ops && ops->fill_linkxstats) {
4798 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
4799 attr = nla_nest_start(skb,
4800 IFLA_STATS_LINK_XSTATS_SLAVE);
4802 goto nla_put_failure;
4804 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4805 nla_nest_end(skb, attr);
4807 goto nla_put_failure;
4812 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
4814 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
4815 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
4817 goto nla_put_failure;
4819 err = rtnl_get_offload_stats(skb, dev, prividx);
4820 if (err == -ENODATA)
4821 nla_nest_cancel(skb, attr);
4823 nla_nest_end(skb, attr);
4825 if (err && err != -ENODATA)
4826 goto nla_put_failure;
4830 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
4831 struct rtnl_af_ops *af_ops;
4833 *idxattr = IFLA_STATS_AF_SPEC;
4834 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
4836 goto nla_put_failure;
4839 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4840 if (af_ops->fill_stats_af) {
4844 af = nla_nest_start(skb, af_ops->family);
4847 goto nla_put_failure;
4849 err = af_ops->fill_stats_af(skb, dev);
4851 if (err == -ENODATA) {
4852 nla_nest_cancel(skb, af);
4853 } else if (err < 0) {
4855 goto nla_put_failure;
4858 nla_nest_end(skb, af);
4863 nla_nest_end(skb, attr);
4868 nlmsg_end(skb, nlh);
4873 /* not a multi message or no progress mean a real error */
4874 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
4875 nlmsg_cancel(skb, nlh);
4877 nlmsg_end(skb, nlh);
4882 static size_t if_nlmsg_stats_size(const struct net_device *dev,
4887 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4888 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
4890 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4891 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4892 int attr = IFLA_STATS_LINK_XSTATS;
4894 if (ops && ops->get_linkxstats_size) {
4895 size += nla_total_size(ops->get_linkxstats_size(dev,
4897 /* for IFLA_STATS_LINK_XSTATS */
4898 size += nla_total_size(0);
4902 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4903 struct net_device *_dev = (struct net_device *)dev;
4904 const struct rtnl_link_ops *ops = NULL;
4905 const struct net_device *master;
4907 /* netdev_master_upper_dev_get can't take const */
4908 master = netdev_master_upper_dev_get(_dev);
4910 ops = master->rtnl_link_ops;
4911 if (ops && ops->get_linkxstats_size) {
4912 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4914 size += nla_total_size(ops->get_linkxstats_size(dev,
4916 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4917 size += nla_total_size(0);
4921 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4922 size += rtnl_get_offload_stats_size(dev);
4924 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4925 struct rtnl_af_ops *af_ops;
4927 /* for IFLA_STATS_AF_SPEC */
4928 size += nla_total_size(0);
4931 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4932 if (af_ops->get_stats_af_size) {
4933 size += nla_total_size(
4934 af_ops->get_stats_af_size(dev));
4937 size += nla_total_size(0);
4946 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
4947 bool is_dump, struct netlink_ext_ack *extack)
4949 struct if_stats_msg *ifsm;
4951 if (nlh->nlmsg_len < sizeof(*ifsm)) {
4952 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
4959 ifsm = nlmsg_data(nlh);
4961 /* only requests using strict checks can pass data to influence
4962 * the dump. The legacy exception is filter_mask.
4964 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
4965 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
4968 if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
4969 NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
4972 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
4973 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
4980 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4981 struct netlink_ext_ack *extack)
4983 struct net *net = sock_net(skb->sk);
4984 struct net_device *dev = NULL;
4985 int idxattr = 0, prividx = 0;
4986 struct if_stats_msg *ifsm;
4987 struct sk_buff *nskb;
4991 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
4996 ifsm = nlmsg_data(nlh);
4997 if (ifsm->ifindex > 0)
4998 dev = __dev_get_by_index(net, ifsm->ifindex);
5005 filter_mask = ifsm->filter_mask;
5009 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
5013 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5014 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5015 0, filter_mask, &idxattr, &prividx);
5017 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5018 WARN_ON(err == -EMSGSIZE);
5021 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5027 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5029 struct netlink_ext_ack *extack = cb->extack;
5030 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5031 struct net *net = sock_net(skb->sk);
5032 unsigned int flags = NLM_F_MULTI;
5033 struct if_stats_msg *ifsm;
5034 struct hlist_head *head;
5035 struct net_device *dev;
5036 u32 filter_mask = 0;
5040 s_idx = cb->args[1];
5041 s_idxattr = cb->args[2];
5042 s_prividx = cb->args[3];
5044 cb->seq = net->dev_base_seq;
5046 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5050 ifsm = nlmsg_data(cb->nlh);
5051 filter_mask = ifsm->filter_mask;
5053 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5057 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5059 head = &net->dev_index_head[h];
5060 hlist_for_each_entry(dev, head, index_hlist) {
5063 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5064 NETLINK_CB(cb->skb).portid,
5065 cb->nlh->nlmsg_seq, 0,
5067 &s_idxattr, &s_prividx);
5068 /* If we ran out of room on the first message,
5071 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5077 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5083 cb->args[3] = s_prividx;
5084 cb->args[2] = s_idxattr;
5091 /* Process one rtnetlink message. */
5093 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
5094 struct netlink_ext_ack *extack)
5096 struct net *net = sock_net(skb->sk);
5097 struct rtnl_link *link;
5098 struct module *owner;
5099 int err = -EOPNOTSUPP;
5100 rtnl_doit_func doit;
5106 type = nlh->nlmsg_type;
5112 /* All the messages must have at least 1 byte length */
5113 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
5116 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
5119 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
5123 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
5125 rtnl_dumpit_func dumpit;
5126 u16 min_dump_alloc = 0;
5128 link = rtnl_get_link(family, type);
5129 if (!link || !link->dumpit) {
5131 link = rtnl_get_link(family, type);
5132 if (!link || !link->dumpit)
5135 owner = link->owner;
5136 dumpit = link->dumpit;
5138 if (type == RTM_GETLINK - RTM_BASE)
5139 min_dump_alloc = rtnl_calcit(skb, nlh);
5142 /* need to do this before rcu_read_unlock() */
5143 if (!try_module_get(owner))
5144 err = -EPROTONOSUPPORT;
5150 struct netlink_dump_control c = {
5152 .min_dump_alloc = min_dump_alloc,
5155 err = netlink_dump_start(rtnl, skb, nlh, &c);
5156 /* netlink_dump_start() will keep a reference on
5157 * module if dump is still in progress.
5164 link = rtnl_get_link(family, type);
5165 if (!link || !link->doit) {
5167 link = rtnl_get_link(PF_UNSPEC, type);
5168 if (!link || !link->doit)
5172 owner = link->owner;
5173 if (!try_module_get(owner)) {
5174 err = -EPROTONOSUPPORT;
5178 flags = link->flags;
5179 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
5183 err = doit(skb, nlh, extack);
5190 link = rtnl_get_link(family, type);
5191 if (link && link->doit)
5192 err = link->doit(skb, nlh, extack);
5208 static void rtnetlink_rcv(struct sk_buff *skb)
5210 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
5213 static int rtnetlink_bind(struct net *net, int group)
5216 case RTNLGRP_IPV4_MROUTE_R:
5217 case RTNLGRP_IPV6_MROUTE_R:
5218 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5225 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
5227 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5231 case NETDEV_CHANGEMTU:
5232 case NETDEV_CHANGEADDR:
5233 case NETDEV_CHANGENAME:
5234 case NETDEV_FEAT_CHANGE:
5235 case NETDEV_BONDING_FAILOVER:
5236 case NETDEV_POST_TYPE_CHANGE:
5237 case NETDEV_NOTIFY_PEERS:
5238 case NETDEV_CHANGEUPPER:
5239 case NETDEV_RESEND_IGMP:
5240 case NETDEV_CHANGEINFODATA:
5241 case NETDEV_CHANGELOWERSTATE:
5242 case NETDEV_CHANGE_TX_QUEUE_LEN:
5243 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
5244 GFP_KERNEL, NULL, 0);
5252 static struct notifier_block rtnetlink_dev_notifier = {
5253 .notifier_call = rtnetlink_event,
5257 static int __net_init rtnetlink_net_init(struct net *net)
5260 struct netlink_kernel_cfg cfg = {
5261 .groups = RTNLGRP_MAX,
5262 .input = rtnetlink_rcv,
5263 .cb_mutex = &rtnl_mutex,
5264 .flags = NL_CFG_F_NONROOT_RECV,
5265 .bind = rtnetlink_bind,
5268 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
5275 static void __net_exit rtnetlink_net_exit(struct net *net)
5277 netlink_kernel_release(net->rtnl);
5281 static struct pernet_operations rtnetlink_net_ops = {
5282 .init = rtnetlink_net_init,
5283 .exit = rtnetlink_net_exit,
5286 void __init rtnetlink_init(void)
5288 if (register_pernet_subsys(&rtnetlink_net_ops))
5289 panic("rtnetlink_init: cannot initialize rtnetlink\n");
5291 register_netdevice_notifier(&rtnetlink_dev_notifier);
5293 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
5294 rtnl_dump_ifinfo, 0);
5295 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
5296 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
5297 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
5299 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
5300 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
5301 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
5303 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
5304 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
5305 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
5307 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
5308 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
5309 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
5311 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,