1 // SPDX-License-Identifier: GPL-2.0-only
3 * Monitoring code for network dropped packet alerts
5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/string.h>
13 #include <linux/if_arp.h>
14 #include <linux/inetdevice.h>
15 #include <linux/inet.h>
16 #include <linux/interrupt.h>
17 #include <linux/netpoll.h>
18 #include <linux/sched.h>
19 #include <linux/delay.h>
20 #include <linux/types.h>
21 #include <linux/workqueue.h>
22 #include <linux/netlink.h>
23 #include <linux/net_dropmon.h>
24 #include <linux/percpu.h>
25 #include <linux/timer.h>
26 #include <linux/bitops.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <net/drop_monitor.h>
30 #include <net/genetlink.h>
31 #include <net/netevent.h>
32 #include <net/flow_offload.h>
34 #include <trace/events/skb.h>
35 #include <trace/events/napi.h>
37 #include <asm/unaligned.h>
43 * Globals, our netlink socket pointer
44 * and the work handle that will send up
47 static int trace_state = TRACE_OFF;
48 static bool monitor_hw;
52 * An overall lock guarding every operation coming from userspace.
53 * It also guards the global 'hw_stats_list' list.
55 static DEFINE_MUTEX(net_dm_mutex);
59 struct u64_stats_sync syncp;
62 #define NET_DM_MAX_HW_TRAP_NAME_LEN 40
64 struct net_dm_hw_entry {
65 char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN];
69 struct net_dm_hw_entries {
71 struct net_dm_hw_entry entries[];
74 struct per_cpu_dm_data {
75 spinlock_t lock; /* Protects 'skb', 'hw_entries' and
80 struct net_dm_hw_entries *hw_entries;
82 struct sk_buff_head drop_queue;
83 struct work_struct dm_alert_work;
84 struct timer_list send_timer;
85 struct net_dm_stats stats;
88 struct dm_hw_stat_delta {
89 struct net_device *dev;
90 unsigned long last_rx;
91 struct list_head list;
93 unsigned long last_drop_val;
96 static struct genl_family net_drop_monitor_family;
98 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
99 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data);
101 static int dm_hit_limit = 64;
102 static int dm_delay = 1;
103 static unsigned long dm_hw_check_delta = 2*HZ;
104 static LIST_HEAD(hw_stats_list);
106 static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
107 static u32 net_dm_trunc_len;
108 static u32 net_dm_queue_len = 1000;
110 struct net_dm_alert_ops {
111 void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
113 void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
114 int work, int budget);
115 void (*work_item_func)(struct work_struct *work);
116 void (*hw_work_item_func)(struct work_struct *work);
117 void (*hw_probe)(struct sk_buff *skb,
118 const struct net_dm_hw_metadata *hw_metadata);
121 struct net_dm_skb_cb {
123 struct net_dm_hw_metadata *hw_metadata;
128 #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
130 static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
133 struct net_dm_alert_msg *msg;
139 al = sizeof(struct net_dm_alert_msg);
140 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
141 al += sizeof(struct nlattr);
143 skb = genlmsg_new(al, GFP_KERNEL);
148 msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
149 0, NET_DM_CMD_ALERT);
155 nla = nla_reserve(skb, NLA_UNSPEC,
156 sizeof(struct net_dm_alert_msg));
167 mod_timer(&data->send_timer, jiffies + HZ / 10);
169 spin_lock_irqsave(&data->lock, flags);
170 swap(data->skb, skb);
171 spin_unlock_irqrestore(&data->lock, flags);
174 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
175 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
177 genlmsg_end(skb, genlmsg_data(gnlh));
183 static const struct genl_multicast_group dropmon_mcgrps[] = {
184 { .name = "events", },
187 static void send_dm_alert(struct work_struct *work)
190 struct per_cpu_dm_data *data;
192 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
194 skb = reset_per_cpu_data(data);
197 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
202 * This is the timer function to delay the sending of an alert
203 * in the event that more drops will arrive during the
206 static void sched_send_work(struct timer_list *t)
208 struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
210 schedule_work(&data->dm_alert_work);
213 static void trace_drop_common(struct sk_buff *skb, void *location)
215 struct net_dm_alert_msg *msg;
216 struct net_dm_drop_point *point;
217 struct nlmsghdr *nlh;
220 struct sk_buff *dskb;
221 struct per_cpu_dm_data *data;
224 local_irq_save(flags);
225 data = this_cpu_ptr(&dm_cpu_data);
226 spin_lock(&data->lock);
232 nlh = (struct nlmsghdr *)dskb->data;
233 nla = genlmsg_data(nlmsg_data(nlh));
236 for (i = 0; i < msg->entries; i++) {
237 if (!memcmp(&location, &point->pc, sizeof(void *))) {
243 if (msg->entries == dm_hit_limit)
246 * We need to create a new entry
248 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
249 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
250 memcpy(point->pc, &location, sizeof(void *));
254 if (!timer_pending(&data->send_timer)) {
255 data->send_timer.expires = jiffies + dm_delay * HZ;
256 add_timer(&data->send_timer);
260 spin_unlock_irqrestore(&data->lock, flags);
263 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
265 trace_drop_common(skb, location);
268 static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
269 int work, int budget)
271 struct dm_hw_stat_delta *new_stat;
274 * Don't check napi structures with no associated device
280 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
282 * only add a note to our monitor buffer if:
283 * 1) this is the dev we received on
284 * 2) its after the last_rx delta
285 * 3) our rx_dropped count has gone up
287 if ((new_stat->dev == napi->dev) &&
288 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
289 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
290 trace_drop_common(NULL, NULL);
291 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
292 new_stat->last_rx = jiffies;
299 static struct net_dm_hw_entries *
300 net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
302 struct net_dm_hw_entries *hw_entries;
305 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
308 /* If the memory allocation failed, we try to perform another
309 * allocation in 1/10 second. Otherwise, the probe function
310 * will constantly bail out.
312 mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
315 spin_lock_irqsave(&hw_data->lock, flags);
316 swap(hw_data->hw_entries, hw_entries);
317 spin_unlock_irqrestore(&hw_data->lock, flags);
322 static int net_dm_hw_entry_put(struct sk_buff *msg,
323 const struct net_dm_hw_entry *hw_entry)
327 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY);
331 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name))
332 goto nla_put_failure;
334 if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count))
335 goto nla_put_failure;
337 nla_nest_end(msg, attr);
342 nla_nest_cancel(msg, attr);
346 static int net_dm_hw_entries_put(struct sk_buff *msg,
347 const struct net_dm_hw_entries *hw_entries)
352 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES);
356 for (i = 0; i < hw_entries->num_entries; i++) {
359 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]);
361 goto nla_put_failure;
364 nla_nest_end(msg, attr);
369 nla_nest_cancel(msg, attr);
374 net_dm_hw_summary_report_fill(struct sk_buff *msg,
375 const struct net_dm_hw_entries *hw_entries)
377 struct net_dm_alert_msg anc_hdr = { 0 };
381 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
386 /* We need to put the ancillary header in order not to break user
389 if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr))
390 goto nla_put_failure;
392 rc = net_dm_hw_entries_put(msg, hw_entries);
394 goto nla_put_failure;
396 genlmsg_end(msg, hdr);
401 genlmsg_cancel(msg, hdr);
405 static void net_dm_hw_summary_work(struct work_struct *work)
407 struct net_dm_hw_entries *hw_entries;
408 struct per_cpu_dm_data *hw_data;
412 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
414 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
418 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
422 rc = net_dm_hw_summary_report_fill(msg, hw_entries);
428 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
435 net_dm_hw_summary_probe(struct sk_buff *skb,
436 const struct net_dm_hw_metadata *hw_metadata)
438 struct net_dm_hw_entries *hw_entries;
439 struct net_dm_hw_entry *hw_entry;
440 struct per_cpu_dm_data *hw_data;
444 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
445 spin_lock_irqsave(&hw_data->lock, flags);
446 hw_entries = hw_data->hw_entries;
451 for (i = 0; i < hw_entries->num_entries; i++) {
452 hw_entry = &hw_entries->entries[i];
453 if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name,
454 NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
459 if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit))
462 hw_entry = &hw_entries->entries[hw_entries->num_entries];
463 strlcpy(hw_entry->trap_name, hw_metadata->trap_name,
464 NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
466 hw_entries->num_entries++;
468 if (!timer_pending(&hw_data->send_timer)) {
469 hw_data->send_timer.expires = jiffies + dm_delay * HZ;
470 add_timer(&hw_data->send_timer);
474 spin_unlock_irqrestore(&hw_data->lock, flags);
477 static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
478 .kfree_skb_probe = trace_kfree_skb_hit,
479 .napi_poll_probe = trace_napi_poll_hit,
480 .work_item_func = send_dm_alert,
481 .hw_work_item_func = net_dm_hw_summary_work,
482 .hw_probe = net_dm_hw_summary_probe,
485 static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
489 ktime_t tstamp = ktime_get_real();
490 struct per_cpu_dm_data *data;
491 struct sk_buff *nskb;
494 if (!skb_mac_header_was_set(skb))
497 nskb = skb_clone(skb, GFP_ATOMIC);
501 NET_DM_SKB_CB(nskb)->pc = location;
502 /* Override the timestamp because we care about the time when the
503 * packet was dropped.
505 nskb->tstamp = tstamp;
507 data = this_cpu_ptr(&dm_cpu_data);
509 spin_lock_irqsave(&data->drop_queue.lock, flags);
510 if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
511 __skb_queue_tail(&data->drop_queue, nskb);
514 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
516 schedule_work(&data->dm_alert_work);
521 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
522 u64_stats_update_begin(&data->stats.syncp);
523 data->stats.dropped++;
524 u64_stats_update_end(&data->stats.syncp);
528 static void net_dm_packet_trace_napi_poll_hit(void *ignore,
529 struct napi_struct *napi,
530 int work, int budget)
534 static size_t net_dm_in_port_size(void)
536 /* NET_DM_ATTR_IN_PORT nest */
537 return nla_total_size(0) +
538 /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
539 nla_total_size(sizeof(u32)) +
540 /* NET_DM_ATTR_PORT_NETDEV_NAME */
541 nla_total_size(IFNAMSIZ + 1);
544 #define NET_DM_MAX_SYMBOL_LEN 40
546 static size_t net_dm_packet_report_size(size_t payload_len)
550 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
552 return NLMSG_ALIGN(size) +
553 /* NET_DM_ATTR_ORIGIN */
554 nla_total_size(sizeof(u16)) +
556 nla_total_size(sizeof(u64)) +
557 /* NET_DM_ATTR_SYMBOL */
558 nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) +
559 /* NET_DM_ATTR_IN_PORT */
560 net_dm_in_port_size() +
561 /* NET_DM_ATTR_TIMESTAMP */
562 nla_total_size(sizeof(u64)) +
563 /* NET_DM_ATTR_ORIG_LEN */
564 nla_total_size(sizeof(u32)) +
565 /* NET_DM_ATTR_PROTO */
566 nla_total_size(sizeof(u16)) +
567 /* NET_DM_ATTR_PAYLOAD */
568 nla_total_size(payload_len);
571 static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex,
576 attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT);
581 nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex))
582 goto nla_put_failure;
584 if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name))
585 goto nla_put_failure;
587 nla_nest_end(msg, attr);
592 nla_nest_cancel(msg, attr);
596 static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
599 u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc;
600 char buf[NET_DM_MAX_SYMBOL_LEN];
605 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
606 NET_DM_CMD_PACKET_ALERT);
610 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW))
611 goto nla_put_failure;
613 if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD))
614 goto nla_put_failure;
616 snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc);
617 if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf))
618 goto nla_put_failure;
620 rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
622 goto nla_put_failure;
624 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
625 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
626 goto nla_put_failure;
628 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
629 goto nla_put_failure;
634 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
635 goto nla_put_failure;
637 attr = skb_put(msg, nla_total_size(payload_len));
638 attr->nla_type = NET_DM_ATTR_PAYLOAD;
639 attr->nla_len = nla_attr_size(payload_len);
640 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
641 goto nla_put_failure;
644 genlmsg_end(msg, hdr);
649 genlmsg_cancel(msg, hdr);
653 #define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
655 static void net_dm_packet_report(struct sk_buff *skb)
661 /* Make sure we start copying the packet from the MAC header */
662 if (skb->data > skb_mac_header(skb))
663 skb_push(skb, skb->data - skb_mac_header(skb));
665 skb_pull(skb, skb_mac_header(skb) - skb->data);
667 /* Ensure packet fits inside a single netlink attribute */
668 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
669 if (net_dm_trunc_len)
670 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
672 msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL);
676 rc = net_dm_packet_report_fill(msg, skb, payload_len);
682 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
688 static void net_dm_packet_work(struct work_struct *work)
690 struct per_cpu_dm_data *data;
691 struct sk_buff_head list;
695 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
697 __skb_queue_head_init(&list);
699 spin_lock_irqsave(&data->drop_queue.lock, flags);
700 skb_queue_splice_tail_init(&data->drop_queue, &list);
701 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
703 while ((skb = __skb_dequeue(&list)))
704 net_dm_packet_report(skb);
708 net_dm_flow_action_cookie_size(const struct net_dm_hw_metadata *hw_metadata)
710 return hw_metadata->fa_cookie ?
711 nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0;
715 net_dm_hw_packet_report_size(size_t payload_len,
716 const struct net_dm_hw_metadata *hw_metadata)
720 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
722 return NLMSG_ALIGN(size) +
723 /* NET_DM_ATTR_ORIGIN */
724 nla_total_size(sizeof(u16)) +
725 /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
726 nla_total_size(strlen(hw_metadata->trap_group_name) + 1) +
727 /* NET_DM_ATTR_HW_TRAP_NAME */
728 nla_total_size(strlen(hw_metadata->trap_name) + 1) +
729 /* NET_DM_ATTR_IN_PORT */
730 net_dm_in_port_size() +
731 /* NET_DM_ATTR_FLOW_ACTION_COOKIE */
732 net_dm_flow_action_cookie_size(hw_metadata) +
733 /* NET_DM_ATTR_TIMESTAMP */
734 nla_total_size(sizeof(u64)) +
735 /* NET_DM_ATTR_ORIG_LEN */
736 nla_total_size(sizeof(u32)) +
737 /* NET_DM_ATTR_PROTO */
738 nla_total_size(sizeof(u16)) +
739 /* NET_DM_ATTR_PAYLOAD */
740 nla_total_size(payload_len);
743 static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
744 struct sk_buff *skb, size_t payload_len)
746 struct net_dm_hw_metadata *hw_metadata;
750 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
752 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
753 NET_DM_CMD_PACKET_ALERT);
757 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW))
758 goto nla_put_failure;
760 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME,
761 hw_metadata->trap_group_name))
762 goto nla_put_failure;
764 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME,
765 hw_metadata->trap_name))
766 goto nla_put_failure;
768 if (hw_metadata->input_dev) {
769 struct net_device *dev = hw_metadata->input_dev;
772 rc = net_dm_packet_report_in_port_put(msg, dev->ifindex,
775 goto nla_put_failure;
778 if (hw_metadata->fa_cookie &&
779 nla_put(msg, NET_DM_ATTR_FLOW_ACTION_COOKIE,
780 hw_metadata->fa_cookie->cookie_len,
781 hw_metadata->fa_cookie->cookie))
782 goto nla_put_failure;
784 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
785 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
786 goto nla_put_failure;
788 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
789 goto nla_put_failure;
794 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
795 goto nla_put_failure;
797 attr = skb_put(msg, nla_total_size(payload_len));
798 attr->nla_type = NET_DM_ATTR_PAYLOAD;
799 attr->nla_len = nla_attr_size(payload_len);
800 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
801 goto nla_put_failure;
804 genlmsg_end(msg, hdr);
809 genlmsg_cancel(msg, hdr);
813 static struct net_dm_hw_metadata *
814 net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
816 const struct flow_action_cookie *fa_cookie;
817 struct net_dm_hw_metadata *n_hw_metadata;
818 const char *trap_group_name;
819 const char *trap_name;
821 n_hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
825 trap_group_name = kstrdup(hw_metadata->trap_group_name, GFP_ATOMIC);
826 if (!trap_group_name)
827 goto free_hw_metadata;
828 n_hw_metadata->trap_group_name = trap_group_name;
830 trap_name = kstrdup(hw_metadata->trap_name, GFP_ATOMIC);
832 goto free_trap_group;
833 n_hw_metadata->trap_name = trap_name;
835 if (hw_metadata->fa_cookie) {
836 size_t cookie_size = sizeof(*fa_cookie) +
837 hw_metadata->fa_cookie->cookie_len;
839 fa_cookie = kmemdup(hw_metadata->fa_cookie, cookie_size,
843 n_hw_metadata->fa_cookie = fa_cookie;
846 n_hw_metadata->input_dev = hw_metadata->input_dev;
847 if (n_hw_metadata->input_dev)
848 dev_hold(n_hw_metadata->input_dev);
850 return n_hw_metadata;
855 kfree(trap_group_name);
857 kfree(n_hw_metadata);
862 net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
864 if (hw_metadata->input_dev)
865 dev_put(hw_metadata->input_dev);
866 kfree(hw_metadata->fa_cookie);
867 kfree(hw_metadata->trap_name);
868 kfree(hw_metadata->trap_group_name);
872 static void net_dm_hw_packet_report(struct sk_buff *skb)
874 struct net_dm_hw_metadata *hw_metadata;
879 if (skb->data > skb_mac_header(skb))
880 skb_push(skb, skb->data - skb_mac_header(skb));
882 skb_pull(skb, skb_mac_header(skb) - skb->data);
884 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
885 if (net_dm_trunc_len)
886 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
888 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
889 msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata),
894 rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
900 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
903 net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
907 static void net_dm_hw_packet_work(struct work_struct *work)
909 struct per_cpu_dm_data *hw_data;
910 struct sk_buff_head list;
914 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
916 __skb_queue_head_init(&list);
918 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
919 skb_queue_splice_tail_init(&hw_data->drop_queue, &list);
920 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
922 while ((skb = __skb_dequeue(&list)))
923 net_dm_hw_packet_report(skb);
927 net_dm_hw_packet_probe(struct sk_buff *skb,
928 const struct net_dm_hw_metadata *hw_metadata)
930 struct net_dm_hw_metadata *n_hw_metadata;
931 ktime_t tstamp = ktime_get_real();
932 struct per_cpu_dm_data *hw_data;
933 struct sk_buff *nskb;
936 if (!skb_mac_header_was_set(skb))
939 nskb = skb_clone(skb, GFP_ATOMIC);
943 n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata);
947 NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
948 nskb->tstamp = tstamp;
950 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
952 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
953 if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len)
954 __skb_queue_tail(&hw_data->drop_queue, nskb);
957 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
959 schedule_work(&hw_data->dm_alert_work);
964 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
965 u64_stats_update_begin(&hw_data->stats.syncp);
966 hw_data->stats.dropped++;
967 u64_stats_update_end(&hw_data->stats.syncp);
968 net_dm_hw_metadata_free(n_hw_metadata);
973 static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
974 .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit,
975 .napi_poll_probe = net_dm_packet_trace_napi_poll_hit,
976 .work_item_func = net_dm_packet_work,
977 .hw_work_item_func = net_dm_hw_packet_work,
978 .hw_probe = net_dm_hw_packet_probe,
981 static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
982 [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops,
983 [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops,
986 void net_dm_hw_report(struct sk_buff *skb,
987 const struct net_dm_hw_metadata *hw_metadata)
994 net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
999 EXPORT_SYMBOL_GPL(net_dm_hw_report);
1001 static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
1003 const struct net_dm_alert_ops *ops;
1007 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
1011 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1013 if (!try_module_get(THIS_MODULE)) {
1014 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1018 for_each_possible_cpu(cpu) {
1019 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1020 struct net_dm_hw_entries *hw_entries;
1022 INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func);
1023 timer_setup(&hw_data->send_timer, sched_send_work, 0);
1024 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
1033 static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
1038 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
1044 /* After this call returns we are guaranteed that no CPU is processing
1045 * any hardware drops.
1049 for_each_possible_cpu(cpu) {
1050 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1051 struct sk_buff *skb;
1053 del_timer_sync(&hw_data->send_timer);
1054 cancel_work_sync(&hw_data->dm_alert_work);
1055 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1056 struct net_dm_hw_metadata *hw_metadata;
1058 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1059 net_dm_hw_metadata_free(hw_metadata);
1064 module_put(THIS_MODULE);
1067 static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
1069 const struct net_dm_alert_ops *ops;
1072 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1074 if (!try_module_get(THIS_MODULE)) {
1075 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1079 for_each_possible_cpu(cpu) {
1080 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1081 struct sk_buff *skb;
1083 INIT_WORK(&data->dm_alert_work, ops->work_item_func);
1084 timer_setup(&data->send_timer, sched_send_work, 0);
1085 /* Allocate a new per-CPU skb for the summary alert message and
1086 * free the old one which might contain stale data from
1089 skb = reset_per_cpu_data(data);
1093 rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1095 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint");
1096 goto err_module_put;
1099 rc = register_trace_napi_poll(ops->napi_poll_probe, NULL);
1101 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint");
1102 goto err_unregister_trace;
1107 err_unregister_trace:
1108 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1110 module_put(THIS_MODULE);
1114 static void net_dm_trace_off_set(void)
1116 struct dm_hw_stat_delta *new_stat, *temp;
1117 const struct net_dm_alert_ops *ops;
1120 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1122 unregister_trace_napi_poll(ops->napi_poll_probe, NULL);
1123 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1125 tracepoint_synchronize_unregister();
1127 /* Make sure we do not send notifications to user space after request
1128 * to stop tracing returns.
1130 for_each_possible_cpu(cpu) {
1131 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1132 struct sk_buff *skb;
1134 del_timer_sync(&data->send_timer);
1135 cancel_work_sync(&data->dm_alert_work);
1136 while ((skb = __skb_dequeue(&data->drop_queue)))
1140 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
1141 if (new_stat->dev == NULL) {
1142 list_del_rcu(&new_stat->list);
1143 kfree_rcu(new_stat, rcu);
1147 module_put(THIS_MODULE);
1150 static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack)
1154 if (state == trace_state) {
1155 NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state");
1161 rc = net_dm_trace_on_set(extack);
1164 net_dm_trace_off_set();
1172 trace_state = state;
1179 static bool net_dm_is_monitoring(void)
1181 return trace_state == TRACE_ON || monitor_hw;
1184 static int net_dm_alert_mode_get_from_info(struct genl_info *info,
1185 enum net_dm_alert_mode *p_alert_mode)
1189 val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
1192 case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */
1193 case NET_DM_ALERT_MODE_PACKET:
1194 *p_alert_mode = val;
1203 static int net_dm_alert_mode_set(struct genl_info *info)
1205 struct netlink_ext_ack *extack = info->extack;
1206 enum net_dm_alert_mode alert_mode;
1209 if (!info->attrs[NET_DM_ATTR_ALERT_MODE])
1212 rc = net_dm_alert_mode_get_from_info(info, &alert_mode);
1214 NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode");
1218 net_dm_alert_mode = alert_mode;
1223 static void net_dm_trunc_len_set(struct genl_info *info)
1225 if (!info->attrs[NET_DM_ATTR_TRUNC_LEN])
1228 net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
1231 static void net_dm_queue_len_set(struct genl_info *info)
1233 if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
1236 net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
1239 static int net_dm_cmd_config(struct sk_buff *skb,
1240 struct genl_info *info)
1242 struct netlink_ext_ack *extack = info->extack;
1245 if (net_dm_is_monitoring()) {
1246 NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring");
1250 rc = net_dm_alert_mode_set(info);
1254 net_dm_trunc_len_set(info);
1256 net_dm_queue_len_set(info);
1261 static int net_dm_monitor_start(bool set_sw, bool set_hw,
1262 struct netlink_ext_ack *extack)
1264 bool sw_set = false;
1268 rc = set_all_monitor_traces(TRACE_ON, extack);
1275 rc = net_dm_hw_monitor_start(extack);
1277 goto err_monitor_hw;
1284 set_all_monitor_traces(TRACE_OFF, extack);
1288 static void net_dm_monitor_stop(bool set_sw, bool set_hw,
1289 struct netlink_ext_ack *extack)
1292 net_dm_hw_monitor_stop(extack);
1294 set_all_monitor_traces(TRACE_OFF, extack);
1297 static int net_dm_cmd_trace(struct sk_buff *skb,
1298 struct genl_info *info)
1300 bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS];
1301 bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS];
1302 struct netlink_ext_ack *extack = info->extack;
1304 /* To maintain backward compatibility, we start / stop monitoring of
1305 * software drops if no flag is specified.
1307 if (!set_sw && !set_hw)
1310 switch (info->genlhdr->cmd) {
1311 case NET_DM_CMD_START:
1312 return net_dm_monitor_start(set_sw, set_hw, extack);
1313 case NET_DM_CMD_STOP:
1314 net_dm_monitor_stop(set_sw, set_hw, extack);
1321 static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info)
1325 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1326 &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW);
1330 if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode))
1331 goto nla_put_failure;
1333 if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
1334 goto nla_put_failure;
1336 if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
1337 goto nla_put_failure;
1339 genlmsg_end(msg, hdr);
1344 genlmsg_cancel(msg, hdr);
1348 static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
1350 struct sk_buff *msg;
1353 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1357 rc = net_dm_config_fill(msg, info);
1361 return genlmsg_reply(msg, info);
1368 static void net_dm_stats_read(struct net_dm_stats *stats)
1372 memset(stats, 0, sizeof(*stats));
1373 for_each_possible_cpu(cpu) {
1374 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1375 struct net_dm_stats *cpu_stats = &data->stats;
1380 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1381 dropped = cpu_stats->dropped;
1382 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1384 stats->dropped += dropped;
1388 static int net_dm_stats_put(struct sk_buff *msg)
1390 struct net_dm_stats stats;
1391 struct nlattr *attr;
1393 net_dm_stats_read(&stats);
1395 attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
1399 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1400 stats.dropped, NET_DM_ATTR_PAD))
1401 goto nla_put_failure;
1403 nla_nest_end(msg, attr);
1408 nla_nest_cancel(msg, attr);
1412 static void net_dm_hw_stats_read(struct net_dm_stats *stats)
1416 memset(stats, 0, sizeof(*stats));
1417 for_each_possible_cpu(cpu) {
1418 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1419 struct net_dm_stats *cpu_stats = &hw_data->stats;
1424 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1425 dropped = cpu_stats->dropped;
1426 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1428 stats->dropped += dropped;
1432 static int net_dm_hw_stats_put(struct sk_buff *msg)
1434 struct net_dm_stats stats;
1435 struct nlattr *attr;
1437 net_dm_hw_stats_read(&stats);
1439 attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS);
1443 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1444 stats.dropped, NET_DM_ATTR_PAD))
1445 goto nla_put_failure;
1447 nla_nest_end(msg, attr);
1452 nla_nest_cancel(msg, attr);
1456 static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
1461 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1462 &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
1466 rc = net_dm_stats_put(msg);
1468 goto nla_put_failure;
1470 rc = net_dm_hw_stats_put(msg);
1472 goto nla_put_failure;
1474 genlmsg_end(msg, hdr);
1479 genlmsg_cancel(msg, hdr);
1483 static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
1485 struct sk_buff *msg;
1488 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1492 rc = net_dm_stats_fill(msg, info);
1496 return genlmsg_reply(msg, info);
1503 static int dropmon_net_event(struct notifier_block *ev_block,
1504 unsigned long event, void *ptr)
1506 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1507 struct dm_hw_stat_delta *new_stat = NULL;
1508 struct dm_hw_stat_delta *tmp;
1511 case NETDEV_REGISTER:
1512 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
1517 new_stat->dev = dev;
1518 new_stat->last_rx = jiffies;
1519 mutex_lock(&net_dm_mutex);
1520 list_add_rcu(&new_stat->list, &hw_stats_list);
1521 mutex_unlock(&net_dm_mutex);
1523 case NETDEV_UNREGISTER:
1524 mutex_lock(&net_dm_mutex);
1525 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
1526 if (new_stat->dev == dev) {
1527 new_stat->dev = NULL;
1528 if (trace_state == TRACE_OFF) {
1529 list_del_rcu(&new_stat->list);
1530 kfree_rcu(new_stat, rcu);
1535 mutex_unlock(&net_dm_mutex);
1542 static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
1543 [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
1544 [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
1545 [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
1546 [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
1547 [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG },
1548 [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG },
1551 static const struct genl_ops dropmon_ops[] = {
1553 .cmd = NET_DM_CMD_CONFIG,
1554 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1555 .doit = net_dm_cmd_config,
1556 .flags = GENL_ADMIN_PERM,
1559 .cmd = NET_DM_CMD_START,
1560 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1561 .doit = net_dm_cmd_trace,
1564 .cmd = NET_DM_CMD_STOP,
1565 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1566 .doit = net_dm_cmd_trace,
1569 .cmd = NET_DM_CMD_CONFIG_GET,
1570 .doit = net_dm_cmd_config_get,
1573 .cmd = NET_DM_CMD_STATS_GET,
1574 .doit = net_dm_cmd_stats_get,
1578 static int net_dm_nl_pre_doit(const struct genl_ops *ops,
1579 struct sk_buff *skb, struct genl_info *info)
1581 mutex_lock(&net_dm_mutex);
1586 static void net_dm_nl_post_doit(const struct genl_ops *ops,
1587 struct sk_buff *skb, struct genl_info *info)
1589 mutex_unlock(&net_dm_mutex);
1592 static struct genl_family net_drop_monitor_family __ro_after_init = {
1596 .maxattr = NET_DM_ATTR_MAX,
1597 .policy = net_dm_nl_policy,
1598 .pre_doit = net_dm_nl_pre_doit,
1599 .post_doit = net_dm_nl_post_doit,
1600 .module = THIS_MODULE,
1602 .n_ops = ARRAY_SIZE(dropmon_ops),
1603 .mcgrps = dropmon_mcgrps,
1604 .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps),
1607 static struct notifier_block dropmon_net_notifier = {
1608 .notifier_call = dropmon_net_event
1611 static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
1613 spin_lock_init(&data->lock);
1614 skb_queue_head_init(&data->drop_queue);
1615 u64_stats_init(&data->stats.syncp);
1618 static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data)
1620 WARN_ON(!skb_queue_empty(&data->drop_queue));
1623 static void net_dm_cpu_data_init(int cpu)
1625 struct per_cpu_dm_data *data;
1627 data = &per_cpu(dm_cpu_data, cpu);
1628 __net_dm_cpu_data_init(data);
1631 static void net_dm_cpu_data_fini(int cpu)
1633 struct per_cpu_dm_data *data;
1635 data = &per_cpu(dm_cpu_data, cpu);
1636 /* At this point, we should have exclusive access
1637 * to this struct and can free the skb inside it.
1639 consume_skb(data->skb);
1640 __net_dm_cpu_data_fini(data);
1643 static void net_dm_hw_cpu_data_init(int cpu)
1645 struct per_cpu_dm_data *hw_data;
1647 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1648 __net_dm_cpu_data_init(hw_data);
1651 static void net_dm_hw_cpu_data_fini(int cpu)
1653 struct per_cpu_dm_data *hw_data;
1655 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1656 kfree(hw_data->hw_entries);
1657 __net_dm_cpu_data_fini(hw_data);
1660 static int __init init_net_drop_monitor(void)
1664 pr_info("Initializing network drop monitor service\n");
1666 if (sizeof(void *) > 8) {
1667 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1671 rc = genl_register_family(&net_drop_monitor_family);
1673 pr_err("Could not create drop monitor netlink family\n");
1676 WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
1678 rc = register_netdevice_notifier(&dropmon_net_notifier);
1680 pr_crit("Failed to register netdevice notifier\n");
1686 for_each_possible_cpu(cpu) {
1687 net_dm_cpu_data_init(cpu);
1688 net_dm_hw_cpu_data_init(cpu);
1694 genl_unregister_family(&net_drop_monitor_family);
1699 static void exit_net_drop_monitor(void)
1703 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
1706 * Because of the module_get/put we do in the trace state change path
1707 * we are guarnateed not to have any current users when we get here
1710 for_each_possible_cpu(cpu) {
1711 net_dm_hw_cpu_data_fini(cpu);
1712 net_dm_cpu_data_fini(cpu);
1715 BUG_ON(genl_unregister_family(&net_drop_monitor_family));
1718 module_init(init_net_drop_monitor);
1719 module_exit(exit_net_drop_monitor);
1721 MODULE_LICENSE("GPL v2");
1722 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1723 MODULE_ALIAS_GENL_FAMILY("NET_DM");
1724 MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");