Merge tag 'writeback_for_v5.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / core / drop_monitor.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Monitoring code for network dropped packet alerts
4  *
5  * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/string.h>
13 #include <linux/if_arp.h>
14 #include <linux/inetdevice.h>
15 #include <linux/inet.h>
16 #include <linux/interrupt.h>
17 #include <linux/netpoll.h>
18 #include <linux/sched.h>
19 #include <linux/delay.h>
20 #include <linux/types.h>
21 #include <linux/workqueue.h>
22 #include <linux/netlink.h>
23 #include <linux/net_dropmon.h>
24 #include <linux/percpu.h>
25 #include <linux/timer.h>
26 #include <linux/bitops.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <net/drop_monitor.h>
30 #include <net/genetlink.h>
31 #include <net/netevent.h>
32 #include <net/flow_offload.h>
33
34 #include <trace/events/skb.h>
35 #include <trace/events/napi.h>
36
37 #include <asm/unaligned.h>
38
39 #define TRACE_ON 1
40 #define TRACE_OFF 0
41
42 /*
43  * Globals, our netlink socket pointer
44  * and the work handle that will send up
45  * netlink alerts
46  */
47 static int trace_state = TRACE_OFF;
48 static bool monitor_hw;
49
50 /* net_dm_mutex
51  *
52  * An overall lock guarding every operation coming from userspace.
53  * It also guards the global 'hw_stats_list' list.
54  */
55 static DEFINE_MUTEX(net_dm_mutex);
56
57 struct net_dm_stats {
58         u64 dropped;
59         struct u64_stats_sync syncp;
60 };
61
62 #define NET_DM_MAX_HW_TRAP_NAME_LEN 40
63
64 struct net_dm_hw_entry {
65         char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN];
66         u32 count;
67 };
68
69 struct net_dm_hw_entries {
70         u32 num_entries;
71         struct net_dm_hw_entry entries[];
72 };
73
74 struct per_cpu_dm_data {
75         spinlock_t              lock;   /* Protects 'skb', 'hw_entries' and
76                                          * 'send_timer'
77                                          */
78         union {
79                 struct sk_buff                  *skb;
80                 struct net_dm_hw_entries        *hw_entries;
81         };
82         struct sk_buff_head     drop_queue;
83         struct work_struct      dm_alert_work;
84         struct timer_list       send_timer;
85         struct net_dm_stats     stats;
86 };
87
88 struct dm_hw_stat_delta {
89         struct net_device *dev;
90         unsigned long last_rx;
91         struct list_head list;
92         struct rcu_head rcu;
93         unsigned long last_drop_val;
94 };
95
96 static struct genl_family net_drop_monitor_family;
97
98 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
99 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data);
100
101 static int dm_hit_limit = 64;
102 static int dm_delay = 1;
103 static unsigned long dm_hw_check_delta = 2*HZ;
104 static LIST_HEAD(hw_stats_list);
105
106 static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
107 static u32 net_dm_trunc_len;
108 static u32 net_dm_queue_len = 1000;
109
110 struct net_dm_alert_ops {
111         void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
112                                 void *location);
113         void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
114                                 int work, int budget);
115         void (*work_item_func)(struct work_struct *work);
116         void (*hw_work_item_func)(struct work_struct *work);
117         void (*hw_probe)(struct sk_buff *skb,
118                          const struct net_dm_hw_metadata *hw_metadata);
119 };
120
121 struct net_dm_skb_cb {
122         union {
123                 struct net_dm_hw_metadata *hw_metadata;
124                 void *pc;
125         };
126 };
127
128 #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
129
130 static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
131 {
132         size_t al;
133         struct net_dm_alert_msg *msg;
134         struct nlattr *nla;
135         struct sk_buff *skb;
136         unsigned long flags;
137         void *msg_header;
138
139         al = sizeof(struct net_dm_alert_msg);
140         al += dm_hit_limit * sizeof(struct net_dm_drop_point);
141         al += sizeof(struct nlattr);
142
143         skb = genlmsg_new(al, GFP_KERNEL);
144
145         if (!skb)
146                 goto err;
147
148         msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
149                                  0, NET_DM_CMD_ALERT);
150         if (!msg_header) {
151                 nlmsg_free(skb);
152                 skb = NULL;
153                 goto err;
154         }
155         nla = nla_reserve(skb, NLA_UNSPEC,
156                           sizeof(struct net_dm_alert_msg));
157         if (!nla) {
158                 nlmsg_free(skb);
159                 skb = NULL;
160                 goto err;
161         }
162         msg = nla_data(nla);
163         memset(msg, 0, al);
164         goto out;
165
166 err:
167         mod_timer(&data->send_timer, jiffies + HZ / 10);
168 out:
169         spin_lock_irqsave(&data->lock, flags);
170         swap(data->skb, skb);
171         spin_unlock_irqrestore(&data->lock, flags);
172
173         if (skb) {
174                 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
175                 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
176
177                 genlmsg_end(skb, genlmsg_data(gnlh));
178         }
179
180         return skb;
181 }
182
183 static const struct genl_multicast_group dropmon_mcgrps[] = {
184         { .name = "events", },
185 };
186
187 static void send_dm_alert(struct work_struct *work)
188 {
189         struct sk_buff *skb;
190         struct per_cpu_dm_data *data;
191
192         data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
193
194         skb = reset_per_cpu_data(data);
195
196         if (skb)
197                 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
198                                   0, GFP_KERNEL);
199 }
200
201 /*
202  * This is the timer function to delay the sending of an alert
203  * in the event that more drops will arrive during the
204  * hysteresis period.
205  */
206 static void sched_send_work(struct timer_list *t)
207 {
208         struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
209
210         schedule_work(&data->dm_alert_work);
211 }
212
213 static void trace_drop_common(struct sk_buff *skb, void *location)
214 {
215         struct net_dm_alert_msg *msg;
216         struct net_dm_drop_point *point;
217         struct nlmsghdr *nlh;
218         struct nlattr *nla;
219         int i;
220         struct sk_buff *dskb;
221         struct per_cpu_dm_data *data;
222         unsigned long flags;
223
224         local_irq_save(flags);
225         data = this_cpu_ptr(&dm_cpu_data);
226         spin_lock(&data->lock);
227         dskb = data->skb;
228
229         if (!dskb)
230                 goto out;
231
232         nlh = (struct nlmsghdr *)dskb->data;
233         nla = genlmsg_data(nlmsg_data(nlh));
234         msg = nla_data(nla);
235         point = msg->points;
236         for (i = 0; i < msg->entries; i++) {
237                 if (!memcmp(&location, &point->pc, sizeof(void *))) {
238                         point->count++;
239                         goto out;
240                 }
241                 point++;
242         }
243         if (msg->entries == dm_hit_limit)
244                 goto out;
245         /*
246          * We need to create a new entry
247          */
248         __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
249         nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
250         memcpy(point->pc, &location, sizeof(void *));
251         point->count = 1;
252         msg->entries++;
253
254         if (!timer_pending(&data->send_timer)) {
255                 data->send_timer.expires = jiffies + dm_delay * HZ;
256                 add_timer(&data->send_timer);
257         }
258
259 out:
260         spin_unlock_irqrestore(&data->lock, flags);
261 }
262
263 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
264 {
265         trace_drop_common(skb, location);
266 }
267
268 static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
269                                 int work, int budget)
270 {
271         struct dm_hw_stat_delta *new_stat;
272
273         /*
274          * Don't check napi structures with no associated device
275          */
276         if (!napi->dev)
277                 return;
278
279         rcu_read_lock();
280         list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
281                 /*
282                  * only add a note to our monitor buffer if:
283                  * 1) this is the dev we received on
284                  * 2) its after the last_rx delta
285                  * 3) our rx_dropped count has gone up
286                  */
287                 if ((new_stat->dev == napi->dev)  &&
288                     (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
289                     (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
290                         trace_drop_common(NULL, NULL);
291                         new_stat->last_drop_val = napi->dev->stats.rx_dropped;
292                         new_stat->last_rx = jiffies;
293                         break;
294                 }
295         }
296         rcu_read_unlock();
297 }
298
299 static struct net_dm_hw_entries *
300 net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
301 {
302         struct net_dm_hw_entries *hw_entries;
303         unsigned long flags;
304
305         hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
306                              GFP_KERNEL);
307         if (!hw_entries) {
308                 /* If the memory allocation failed, we try to perform another
309                  * allocation in 1/10 second. Otherwise, the probe function
310                  * will constantly bail out.
311                  */
312                 mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
313         }
314
315         spin_lock_irqsave(&hw_data->lock, flags);
316         swap(hw_data->hw_entries, hw_entries);
317         spin_unlock_irqrestore(&hw_data->lock, flags);
318
319         return hw_entries;
320 }
321
322 static int net_dm_hw_entry_put(struct sk_buff *msg,
323                                const struct net_dm_hw_entry *hw_entry)
324 {
325         struct nlattr *attr;
326
327         attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY);
328         if (!attr)
329                 return -EMSGSIZE;
330
331         if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name))
332                 goto nla_put_failure;
333
334         if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count))
335                 goto nla_put_failure;
336
337         nla_nest_end(msg, attr);
338
339         return 0;
340
341 nla_put_failure:
342         nla_nest_cancel(msg, attr);
343         return -EMSGSIZE;
344 }
345
346 static int net_dm_hw_entries_put(struct sk_buff *msg,
347                                  const struct net_dm_hw_entries *hw_entries)
348 {
349         struct nlattr *attr;
350         int i;
351
352         attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES);
353         if (!attr)
354                 return -EMSGSIZE;
355
356         for (i = 0; i < hw_entries->num_entries; i++) {
357                 int rc;
358
359                 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]);
360                 if (rc)
361                         goto nla_put_failure;
362         }
363
364         nla_nest_end(msg, attr);
365
366         return 0;
367
368 nla_put_failure:
369         nla_nest_cancel(msg, attr);
370         return -EMSGSIZE;
371 }
372
373 static int
374 net_dm_hw_summary_report_fill(struct sk_buff *msg,
375                               const struct net_dm_hw_entries *hw_entries)
376 {
377         struct net_dm_alert_msg anc_hdr = { 0 };
378         void *hdr;
379         int rc;
380
381         hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
382                           NET_DM_CMD_ALERT);
383         if (!hdr)
384                 return -EMSGSIZE;
385
386         /* We need to put the ancillary header in order not to break user
387          * space.
388          */
389         if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr))
390                 goto nla_put_failure;
391
392         rc = net_dm_hw_entries_put(msg, hw_entries);
393         if (rc)
394                 goto nla_put_failure;
395
396         genlmsg_end(msg, hdr);
397
398         return 0;
399
400 nla_put_failure:
401         genlmsg_cancel(msg, hdr);
402         return -EMSGSIZE;
403 }
404
405 static void net_dm_hw_summary_work(struct work_struct *work)
406 {
407         struct net_dm_hw_entries *hw_entries;
408         struct per_cpu_dm_data *hw_data;
409         struct sk_buff *msg;
410         int rc;
411
412         hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
413
414         hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
415         if (!hw_entries)
416                 return;
417
418         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
419         if (!msg)
420                 goto out;
421
422         rc = net_dm_hw_summary_report_fill(msg, hw_entries);
423         if (rc) {
424                 nlmsg_free(msg);
425                 goto out;
426         }
427
428         genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
429
430 out:
431         kfree(hw_entries);
432 }
433
434 static void
435 net_dm_hw_summary_probe(struct sk_buff *skb,
436                         const struct net_dm_hw_metadata *hw_metadata)
437 {
438         struct net_dm_hw_entries *hw_entries;
439         struct net_dm_hw_entry *hw_entry;
440         struct per_cpu_dm_data *hw_data;
441         unsigned long flags;
442         int i;
443
444         hw_data = this_cpu_ptr(&dm_hw_cpu_data);
445         spin_lock_irqsave(&hw_data->lock, flags);
446         hw_entries = hw_data->hw_entries;
447
448         if (!hw_entries)
449                 goto out;
450
451         for (i = 0; i < hw_entries->num_entries; i++) {
452                 hw_entry = &hw_entries->entries[i];
453                 if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name,
454                              NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
455                         hw_entry->count++;
456                         goto out;
457                 }
458         }
459         if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit))
460                 goto out;
461
462         hw_entry = &hw_entries->entries[hw_entries->num_entries];
463         strlcpy(hw_entry->trap_name, hw_metadata->trap_name,
464                 NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
465         hw_entry->count = 1;
466         hw_entries->num_entries++;
467
468         if (!timer_pending(&hw_data->send_timer)) {
469                 hw_data->send_timer.expires = jiffies + dm_delay * HZ;
470                 add_timer(&hw_data->send_timer);
471         }
472
473 out:
474         spin_unlock_irqrestore(&hw_data->lock, flags);
475 }
476
477 static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
478         .kfree_skb_probe        = trace_kfree_skb_hit,
479         .napi_poll_probe        = trace_napi_poll_hit,
480         .work_item_func         = send_dm_alert,
481         .hw_work_item_func      = net_dm_hw_summary_work,
482         .hw_probe               = net_dm_hw_summary_probe,
483 };
484
485 static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
486                                               struct sk_buff *skb,
487                                               void *location)
488 {
489         ktime_t tstamp = ktime_get_real();
490         struct per_cpu_dm_data *data;
491         struct sk_buff *nskb;
492         unsigned long flags;
493
494         if (!skb_mac_header_was_set(skb))
495                 return;
496
497         nskb = skb_clone(skb, GFP_ATOMIC);
498         if (!nskb)
499                 return;
500
501         NET_DM_SKB_CB(nskb)->pc = location;
502         /* Override the timestamp because we care about the time when the
503          * packet was dropped.
504          */
505         nskb->tstamp = tstamp;
506
507         data = this_cpu_ptr(&dm_cpu_data);
508
509         spin_lock_irqsave(&data->drop_queue.lock, flags);
510         if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
511                 __skb_queue_tail(&data->drop_queue, nskb);
512         else
513                 goto unlock_free;
514         spin_unlock_irqrestore(&data->drop_queue.lock, flags);
515
516         schedule_work(&data->dm_alert_work);
517
518         return;
519
520 unlock_free:
521         spin_unlock_irqrestore(&data->drop_queue.lock, flags);
522         u64_stats_update_begin(&data->stats.syncp);
523         data->stats.dropped++;
524         u64_stats_update_end(&data->stats.syncp);
525         consume_skb(nskb);
526 }
527
528 static void net_dm_packet_trace_napi_poll_hit(void *ignore,
529                                               struct napi_struct *napi,
530                                               int work, int budget)
531 {
532 }
533
534 static size_t net_dm_in_port_size(void)
535 {
536                /* NET_DM_ATTR_IN_PORT nest */
537         return nla_total_size(0) +
538                /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
539                nla_total_size(sizeof(u32)) +
540                /* NET_DM_ATTR_PORT_NETDEV_NAME */
541                nla_total_size(IFNAMSIZ + 1);
542 }
543
544 #define NET_DM_MAX_SYMBOL_LEN 40
545
546 static size_t net_dm_packet_report_size(size_t payload_len)
547 {
548         size_t size;
549
550         size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
551
552         return NLMSG_ALIGN(size) +
553                /* NET_DM_ATTR_ORIGIN */
554                nla_total_size(sizeof(u16)) +
555                /* NET_DM_ATTR_PC */
556                nla_total_size(sizeof(u64)) +
557                /* NET_DM_ATTR_SYMBOL */
558                nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) +
559                /* NET_DM_ATTR_IN_PORT */
560                net_dm_in_port_size() +
561                /* NET_DM_ATTR_TIMESTAMP */
562                nla_total_size(sizeof(u64)) +
563                /* NET_DM_ATTR_ORIG_LEN */
564                nla_total_size(sizeof(u32)) +
565                /* NET_DM_ATTR_PROTO */
566                nla_total_size(sizeof(u16)) +
567                /* NET_DM_ATTR_PAYLOAD */
568                nla_total_size(payload_len);
569 }
570
571 static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex,
572                                             const char *name)
573 {
574         struct nlattr *attr;
575
576         attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT);
577         if (!attr)
578                 return -EMSGSIZE;
579
580         if (ifindex &&
581             nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex))
582                 goto nla_put_failure;
583
584         if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name))
585                 goto nla_put_failure;
586
587         nla_nest_end(msg, attr);
588
589         return 0;
590
591 nla_put_failure:
592         nla_nest_cancel(msg, attr);
593         return -EMSGSIZE;
594 }
595
596 static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
597                                      size_t payload_len)
598 {
599         u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc;
600         char buf[NET_DM_MAX_SYMBOL_LEN];
601         struct nlattr *attr;
602         void *hdr;
603         int rc;
604
605         hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
606                           NET_DM_CMD_PACKET_ALERT);
607         if (!hdr)
608                 return -EMSGSIZE;
609
610         if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW))
611                 goto nla_put_failure;
612
613         if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD))
614                 goto nla_put_failure;
615
616         snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc);
617         if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf))
618                 goto nla_put_failure;
619
620         rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
621         if (rc)
622                 goto nla_put_failure;
623
624         if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
625                               ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
626                 goto nla_put_failure;
627
628         if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
629                 goto nla_put_failure;
630
631         if (!payload_len)
632                 goto out;
633
634         if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
635                 goto nla_put_failure;
636
637         attr = skb_put(msg, nla_total_size(payload_len));
638         attr->nla_type = NET_DM_ATTR_PAYLOAD;
639         attr->nla_len = nla_attr_size(payload_len);
640         if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
641                 goto nla_put_failure;
642
643 out:
644         genlmsg_end(msg, hdr);
645
646         return 0;
647
648 nla_put_failure:
649         genlmsg_cancel(msg, hdr);
650         return -EMSGSIZE;
651 }
652
653 #define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
654
655 static void net_dm_packet_report(struct sk_buff *skb)
656 {
657         struct sk_buff *msg;
658         size_t payload_len;
659         int rc;
660
661         /* Make sure we start copying the packet from the MAC header */
662         if (skb->data > skb_mac_header(skb))
663                 skb_push(skb, skb->data - skb_mac_header(skb));
664         else
665                 skb_pull(skb, skb_mac_header(skb) - skb->data);
666
667         /* Ensure packet fits inside a single netlink attribute */
668         payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
669         if (net_dm_trunc_len)
670                 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
671
672         msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL);
673         if (!msg)
674                 goto out;
675
676         rc = net_dm_packet_report_fill(msg, skb, payload_len);
677         if (rc) {
678                 nlmsg_free(msg);
679                 goto out;
680         }
681
682         genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
683
684 out:
685         consume_skb(skb);
686 }
687
688 static void net_dm_packet_work(struct work_struct *work)
689 {
690         struct per_cpu_dm_data *data;
691         struct sk_buff_head list;
692         struct sk_buff *skb;
693         unsigned long flags;
694
695         data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
696
697         __skb_queue_head_init(&list);
698
699         spin_lock_irqsave(&data->drop_queue.lock, flags);
700         skb_queue_splice_tail_init(&data->drop_queue, &list);
701         spin_unlock_irqrestore(&data->drop_queue.lock, flags);
702
703         while ((skb = __skb_dequeue(&list)))
704                 net_dm_packet_report(skb);
705 }
706
707 static size_t
708 net_dm_flow_action_cookie_size(const struct net_dm_hw_metadata *hw_metadata)
709 {
710         return hw_metadata->fa_cookie ?
711                nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0;
712 }
713
714 static size_t
715 net_dm_hw_packet_report_size(size_t payload_len,
716                              const struct net_dm_hw_metadata *hw_metadata)
717 {
718         size_t size;
719
720         size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
721
722         return NLMSG_ALIGN(size) +
723                /* NET_DM_ATTR_ORIGIN */
724                nla_total_size(sizeof(u16)) +
725                /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
726                nla_total_size(strlen(hw_metadata->trap_group_name) + 1) +
727                /* NET_DM_ATTR_HW_TRAP_NAME */
728                nla_total_size(strlen(hw_metadata->trap_name) + 1) +
729                /* NET_DM_ATTR_IN_PORT */
730                net_dm_in_port_size() +
731                /* NET_DM_ATTR_FLOW_ACTION_COOKIE */
732                net_dm_flow_action_cookie_size(hw_metadata) +
733                /* NET_DM_ATTR_TIMESTAMP */
734                nla_total_size(sizeof(u64)) +
735                /* NET_DM_ATTR_ORIG_LEN */
736                nla_total_size(sizeof(u32)) +
737                /* NET_DM_ATTR_PROTO */
738                nla_total_size(sizeof(u16)) +
739                /* NET_DM_ATTR_PAYLOAD */
740                nla_total_size(payload_len);
741 }
742
743 static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
744                                         struct sk_buff *skb, size_t payload_len)
745 {
746         struct net_dm_hw_metadata *hw_metadata;
747         struct nlattr *attr;
748         void *hdr;
749
750         hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
751
752         hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
753                           NET_DM_CMD_PACKET_ALERT);
754         if (!hdr)
755                 return -EMSGSIZE;
756
757         if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW))
758                 goto nla_put_failure;
759
760         if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME,
761                            hw_metadata->trap_group_name))
762                 goto nla_put_failure;
763
764         if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME,
765                            hw_metadata->trap_name))
766                 goto nla_put_failure;
767
768         if (hw_metadata->input_dev) {
769                 struct net_device *dev = hw_metadata->input_dev;
770                 int rc;
771
772                 rc = net_dm_packet_report_in_port_put(msg, dev->ifindex,
773                                                       dev->name);
774                 if (rc)
775                         goto nla_put_failure;
776         }
777
778         if (hw_metadata->fa_cookie &&
779             nla_put(msg, NET_DM_ATTR_FLOW_ACTION_COOKIE,
780                     hw_metadata->fa_cookie->cookie_len,
781                     hw_metadata->fa_cookie->cookie))
782                 goto nla_put_failure;
783
784         if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
785                               ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
786                 goto nla_put_failure;
787
788         if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
789                 goto nla_put_failure;
790
791         if (!payload_len)
792                 goto out;
793
794         if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
795                 goto nla_put_failure;
796
797         attr = skb_put(msg, nla_total_size(payload_len));
798         attr->nla_type = NET_DM_ATTR_PAYLOAD;
799         attr->nla_len = nla_attr_size(payload_len);
800         if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
801                 goto nla_put_failure;
802
803 out:
804         genlmsg_end(msg, hdr);
805
806         return 0;
807
808 nla_put_failure:
809         genlmsg_cancel(msg, hdr);
810         return -EMSGSIZE;
811 }
812
813 static struct net_dm_hw_metadata *
814 net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
815 {
816         const struct flow_action_cookie *fa_cookie;
817         struct net_dm_hw_metadata *n_hw_metadata;
818         const char *trap_group_name;
819         const char *trap_name;
820
821         n_hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
822         if (!n_hw_metadata)
823                 return NULL;
824
825         trap_group_name = kstrdup(hw_metadata->trap_group_name, GFP_ATOMIC);
826         if (!trap_group_name)
827                 goto free_hw_metadata;
828         n_hw_metadata->trap_group_name = trap_group_name;
829
830         trap_name = kstrdup(hw_metadata->trap_name, GFP_ATOMIC);
831         if (!trap_name)
832                 goto free_trap_group;
833         n_hw_metadata->trap_name = trap_name;
834
835         if (hw_metadata->fa_cookie) {
836                 size_t cookie_size = sizeof(*fa_cookie) +
837                                      hw_metadata->fa_cookie->cookie_len;
838
839                 fa_cookie = kmemdup(hw_metadata->fa_cookie, cookie_size,
840                                     GFP_ATOMIC);
841                 if (!fa_cookie)
842                         goto free_trap_name;
843                 n_hw_metadata->fa_cookie = fa_cookie;
844         }
845
846         n_hw_metadata->input_dev = hw_metadata->input_dev;
847         if (n_hw_metadata->input_dev)
848                 dev_hold(n_hw_metadata->input_dev);
849
850         return n_hw_metadata;
851
852 free_trap_name:
853         kfree(trap_name);
854 free_trap_group:
855         kfree(trap_group_name);
856 free_hw_metadata:
857         kfree(n_hw_metadata);
858         return NULL;
859 }
860
861 static void
862 net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
863 {
864         if (hw_metadata->input_dev)
865                 dev_put(hw_metadata->input_dev);
866         kfree(hw_metadata->fa_cookie);
867         kfree(hw_metadata->trap_name);
868         kfree(hw_metadata->trap_group_name);
869         kfree(hw_metadata);
870 }
871
872 static void net_dm_hw_packet_report(struct sk_buff *skb)
873 {
874         struct net_dm_hw_metadata *hw_metadata;
875         struct sk_buff *msg;
876         size_t payload_len;
877         int rc;
878
879         if (skb->data > skb_mac_header(skb))
880                 skb_push(skb, skb->data - skb_mac_header(skb));
881         else
882                 skb_pull(skb, skb_mac_header(skb) - skb->data);
883
884         payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
885         if (net_dm_trunc_len)
886                 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
887
888         hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
889         msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata),
890                         GFP_KERNEL);
891         if (!msg)
892                 goto out;
893
894         rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
895         if (rc) {
896                 nlmsg_free(msg);
897                 goto out;
898         }
899
900         genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
901
902 out:
903         net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
904         consume_skb(skb);
905 }
906
907 static void net_dm_hw_packet_work(struct work_struct *work)
908 {
909         struct per_cpu_dm_data *hw_data;
910         struct sk_buff_head list;
911         struct sk_buff *skb;
912         unsigned long flags;
913
914         hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
915
916         __skb_queue_head_init(&list);
917
918         spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
919         skb_queue_splice_tail_init(&hw_data->drop_queue, &list);
920         spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
921
922         while ((skb = __skb_dequeue(&list)))
923                 net_dm_hw_packet_report(skb);
924 }
925
926 static void
927 net_dm_hw_packet_probe(struct sk_buff *skb,
928                        const struct net_dm_hw_metadata *hw_metadata)
929 {
930         struct net_dm_hw_metadata *n_hw_metadata;
931         ktime_t tstamp = ktime_get_real();
932         struct per_cpu_dm_data *hw_data;
933         struct sk_buff *nskb;
934         unsigned long flags;
935
936         if (!skb_mac_header_was_set(skb))
937                 return;
938
939         nskb = skb_clone(skb, GFP_ATOMIC);
940         if (!nskb)
941                 return;
942
943         n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata);
944         if (!n_hw_metadata)
945                 goto free;
946
947         NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
948         nskb->tstamp = tstamp;
949
950         hw_data = this_cpu_ptr(&dm_hw_cpu_data);
951
952         spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
953         if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len)
954                 __skb_queue_tail(&hw_data->drop_queue, nskb);
955         else
956                 goto unlock_free;
957         spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
958
959         schedule_work(&hw_data->dm_alert_work);
960
961         return;
962
963 unlock_free:
964         spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
965         u64_stats_update_begin(&hw_data->stats.syncp);
966         hw_data->stats.dropped++;
967         u64_stats_update_end(&hw_data->stats.syncp);
968         net_dm_hw_metadata_free(n_hw_metadata);
969 free:
970         consume_skb(nskb);
971 }
972
973 static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
974         .kfree_skb_probe        = net_dm_packet_trace_kfree_skb_hit,
975         .napi_poll_probe        = net_dm_packet_trace_napi_poll_hit,
976         .work_item_func         = net_dm_packet_work,
977         .hw_work_item_func      = net_dm_hw_packet_work,
978         .hw_probe               = net_dm_hw_packet_probe,
979 };
980
981 static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
982         [NET_DM_ALERT_MODE_SUMMARY]     = &net_dm_alert_summary_ops,
983         [NET_DM_ALERT_MODE_PACKET]      = &net_dm_alert_packet_ops,
984 };
985
986 void net_dm_hw_report(struct sk_buff *skb,
987                       const struct net_dm_hw_metadata *hw_metadata)
988 {
989         rcu_read_lock();
990
991         if (!monitor_hw)
992                 goto out;
993
994         net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
995
996 out:
997         rcu_read_unlock();
998 }
999 EXPORT_SYMBOL_GPL(net_dm_hw_report);
1000
1001 static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
1002 {
1003         const struct net_dm_alert_ops *ops;
1004         int cpu;
1005
1006         if (monitor_hw) {
1007                 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
1008                 return -EAGAIN;
1009         }
1010
1011         ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1012
1013         if (!try_module_get(THIS_MODULE)) {
1014                 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1015                 return -ENODEV;
1016         }
1017
1018         for_each_possible_cpu(cpu) {
1019                 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1020                 struct net_dm_hw_entries *hw_entries;
1021
1022                 INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func);
1023                 timer_setup(&hw_data->send_timer, sched_send_work, 0);
1024                 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
1025                 kfree(hw_entries);
1026         }
1027
1028         monitor_hw = true;
1029
1030         return 0;
1031 }
1032
1033 static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
1034 {
1035         int cpu;
1036
1037         if (!monitor_hw) {
1038                 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
1039                 return;
1040         }
1041
1042         monitor_hw = false;
1043
1044         /* After this call returns we are guaranteed that no CPU is processing
1045          * any hardware drops.
1046          */
1047         synchronize_rcu();
1048
1049         for_each_possible_cpu(cpu) {
1050                 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1051                 struct sk_buff *skb;
1052
1053                 del_timer_sync(&hw_data->send_timer);
1054                 cancel_work_sync(&hw_data->dm_alert_work);
1055                 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1056                         struct net_dm_hw_metadata *hw_metadata;
1057
1058                         hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1059                         net_dm_hw_metadata_free(hw_metadata);
1060                         consume_skb(skb);
1061                 }
1062         }
1063
1064         module_put(THIS_MODULE);
1065 }
1066
1067 static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
1068 {
1069         const struct net_dm_alert_ops *ops;
1070         int cpu, rc;
1071
1072         ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1073
1074         if (!try_module_get(THIS_MODULE)) {
1075                 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1076                 return -ENODEV;
1077         }
1078
1079         for_each_possible_cpu(cpu) {
1080                 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1081                 struct sk_buff *skb;
1082
1083                 INIT_WORK(&data->dm_alert_work, ops->work_item_func);
1084                 timer_setup(&data->send_timer, sched_send_work, 0);
1085                 /* Allocate a new per-CPU skb for the summary alert message and
1086                  * free the old one which might contain stale data from
1087                  * previous tracing.
1088                  */
1089                 skb = reset_per_cpu_data(data);
1090                 consume_skb(skb);
1091         }
1092
1093         rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1094         if (rc) {
1095                 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint");
1096                 goto err_module_put;
1097         }
1098
1099         rc = register_trace_napi_poll(ops->napi_poll_probe, NULL);
1100         if (rc) {
1101                 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint");
1102                 goto err_unregister_trace;
1103         }
1104
1105         return 0;
1106
1107 err_unregister_trace:
1108         unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1109 err_module_put:
1110         module_put(THIS_MODULE);
1111         return rc;
1112 }
1113
1114 static void net_dm_trace_off_set(void)
1115 {
1116         struct dm_hw_stat_delta *new_stat, *temp;
1117         const struct net_dm_alert_ops *ops;
1118         int cpu;
1119
1120         ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1121
1122         unregister_trace_napi_poll(ops->napi_poll_probe, NULL);
1123         unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1124
1125         tracepoint_synchronize_unregister();
1126
1127         /* Make sure we do not send notifications to user space after request
1128          * to stop tracing returns.
1129          */
1130         for_each_possible_cpu(cpu) {
1131                 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1132                 struct sk_buff *skb;
1133
1134                 del_timer_sync(&data->send_timer);
1135                 cancel_work_sync(&data->dm_alert_work);
1136                 while ((skb = __skb_dequeue(&data->drop_queue)))
1137                         consume_skb(skb);
1138         }
1139
1140         list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
1141                 if (new_stat->dev == NULL) {
1142                         list_del_rcu(&new_stat->list);
1143                         kfree_rcu(new_stat, rcu);
1144                 }
1145         }
1146
1147         module_put(THIS_MODULE);
1148 }
1149
1150 static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack)
1151 {
1152         int rc = 0;
1153
1154         if (state == trace_state) {
1155                 NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state");
1156                 return -EAGAIN;
1157         }
1158
1159         switch (state) {
1160         case TRACE_ON:
1161                 rc = net_dm_trace_on_set(extack);
1162                 break;
1163         case TRACE_OFF:
1164                 net_dm_trace_off_set();
1165                 break;
1166         default:
1167                 rc = 1;
1168                 break;
1169         }
1170
1171         if (!rc)
1172                 trace_state = state;
1173         else
1174                 rc = -EINPROGRESS;
1175
1176         return rc;
1177 }
1178
1179 static bool net_dm_is_monitoring(void)
1180 {
1181         return trace_state == TRACE_ON || monitor_hw;
1182 }
1183
1184 static int net_dm_alert_mode_get_from_info(struct genl_info *info,
1185                                            enum net_dm_alert_mode *p_alert_mode)
1186 {
1187         u8 val;
1188
1189         val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
1190
1191         switch (val) {
1192         case NET_DM_ALERT_MODE_SUMMARY:
1193         case NET_DM_ALERT_MODE_PACKET:
1194                 *p_alert_mode = val;
1195                 break;
1196         default:
1197                 return -EINVAL;
1198         }
1199
1200         return 0;
1201 }
1202
1203 static int net_dm_alert_mode_set(struct genl_info *info)
1204 {
1205         struct netlink_ext_ack *extack = info->extack;
1206         enum net_dm_alert_mode alert_mode;
1207         int rc;
1208
1209         if (!info->attrs[NET_DM_ATTR_ALERT_MODE])
1210                 return 0;
1211
1212         rc = net_dm_alert_mode_get_from_info(info, &alert_mode);
1213         if (rc) {
1214                 NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode");
1215                 return -EINVAL;
1216         }
1217
1218         net_dm_alert_mode = alert_mode;
1219
1220         return 0;
1221 }
1222
1223 static void net_dm_trunc_len_set(struct genl_info *info)
1224 {
1225         if (!info->attrs[NET_DM_ATTR_TRUNC_LEN])
1226                 return;
1227
1228         net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
1229 }
1230
1231 static void net_dm_queue_len_set(struct genl_info *info)
1232 {
1233         if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
1234                 return;
1235
1236         net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
1237 }
1238
1239 static int net_dm_cmd_config(struct sk_buff *skb,
1240                         struct genl_info *info)
1241 {
1242         struct netlink_ext_ack *extack = info->extack;
1243         int rc;
1244
1245         if (net_dm_is_monitoring()) {
1246                 NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring");
1247                 return -EBUSY;
1248         }
1249
1250         rc = net_dm_alert_mode_set(info);
1251         if (rc)
1252                 return rc;
1253
1254         net_dm_trunc_len_set(info);
1255
1256         net_dm_queue_len_set(info);
1257
1258         return 0;
1259 }
1260
1261 static int net_dm_monitor_start(bool set_sw, bool set_hw,
1262                                 struct netlink_ext_ack *extack)
1263 {
1264         bool sw_set = false;
1265         int rc;
1266
1267         if (set_sw) {
1268                 rc = set_all_monitor_traces(TRACE_ON, extack);
1269                 if (rc)
1270                         return rc;
1271                 sw_set = true;
1272         }
1273
1274         if (set_hw) {
1275                 rc = net_dm_hw_monitor_start(extack);
1276                 if (rc)
1277                         goto err_monitor_hw;
1278         }
1279
1280         return 0;
1281
1282 err_monitor_hw:
1283         if (sw_set)
1284                 set_all_monitor_traces(TRACE_OFF, extack);
1285         return rc;
1286 }
1287
1288 static void net_dm_monitor_stop(bool set_sw, bool set_hw,
1289                                 struct netlink_ext_ack *extack)
1290 {
1291         if (set_hw)
1292                 net_dm_hw_monitor_stop(extack);
1293         if (set_sw)
1294                 set_all_monitor_traces(TRACE_OFF, extack);
1295 }
1296
1297 static int net_dm_cmd_trace(struct sk_buff *skb,
1298                         struct genl_info *info)
1299 {
1300         bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS];
1301         bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS];
1302         struct netlink_ext_ack *extack = info->extack;
1303
1304         /* To maintain backward compatibility, we start / stop monitoring of
1305          * software drops if no flag is specified.
1306          */
1307         if (!set_sw && !set_hw)
1308                 set_sw = true;
1309
1310         switch (info->genlhdr->cmd) {
1311         case NET_DM_CMD_START:
1312                 return net_dm_monitor_start(set_sw, set_hw, extack);
1313         case NET_DM_CMD_STOP:
1314                 net_dm_monitor_stop(set_sw, set_hw, extack);
1315                 return 0;
1316         }
1317
1318         return -EOPNOTSUPP;
1319 }
1320
1321 static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info)
1322 {
1323         void *hdr;
1324
1325         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1326                           &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW);
1327         if (!hdr)
1328                 return -EMSGSIZE;
1329
1330         if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode))
1331                 goto nla_put_failure;
1332
1333         if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
1334                 goto nla_put_failure;
1335
1336         if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
1337                 goto nla_put_failure;
1338
1339         genlmsg_end(msg, hdr);
1340
1341         return 0;
1342
1343 nla_put_failure:
1344         genlmsg_cancel(msg, hdr);
1345         return -EMSGSIZE;
1346 }
1347
1348 static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
1349 {
1350         struct sk_buff *msg;
1351         int rc;
1352
1353         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1354         if (!msg)
1355                 return -ENOMEM;
1356
1357         rc = net_dm_config_fill(msg, info);
1358         if (rc)
1359                 goto free_msg;
1360
1361         return genlmsg_reply(msg, info);
1362
1363 free_msg:
1364         nlmsg_free(msg);
1365         return rc;
1366 }
1367
1368 static void net_dm_stats_read(struct net_dm_stats *stats)
1369 {
1370         int cpu;
1371
1372         memset(stats, 0, sizeof(*stats));
1373         for_each_possible_cpu(cpu) {
1374                 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1375                 struct net_dm_stats *cpu_stats = &data->stats;
1376                 unsigned int start;
1377                 u64 dropped;
1378
1379                 do {
1380                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1381                         dropped = cpu_stats->dropped;
1382                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1383
1384                 stats->dropped += dropped;
1385         }
1386 }
1387
1388 static int net_dm_stats_put(struct sk_buff *msg)
1389 {
1390         struct net_dm_stats stats;
1391         struct nlattr *attr;
1392
1393         net_dm_stats_read(&stats);
1394
1395         attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
1396         if (!attr)
1397                 return -EMSGSIZE;
1398
1399         if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1400                               stats.dropped, NET_DM_ATTR_PAD))
1401                 goto nla_put_failure;
1402
1403         nla_nest_end(msg, attr);
1404
1405         return 0;
1406
1407 nla_put_failure:
1408         nla_nest_cancel(msg, attr);
1409         return -EMSGSIZE;
1410 }
1411
1412 static void net_dm_hw_stats_read(struct net_dm_stats *stats)
1413 {
1414         int cpu;
1415
1416         memset(stats, 0, sizeof(*stats));
1417         for_each_possible_cpu(cpu) {
1418                 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1419                 struct net_dm_stats *cpu_stats = &hw_data->stats;
1420                 unsigned int start;
1421                 u64 dropped;
1422
1423                 do {
1424                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1425                         dropped = cpu_stats->dropped;
1426                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1427
1428                 stats->dropped += dropped;
1429         }
1430 }
1431
1432 static int net_dm_hw_stats_put(struct sk_buff *msg)
1433 {
1434         struct net_dm_stats stats;
1435         struct nlattr *attr;
1436
1437         net_dm_hw_stats_read(&stats);
1438
1439         attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS);
1440         if (!attr)
1441                 return -EMSGSIZE;
1442
1443         if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1444                               stats.dropped, NET_DM_ATTR_PAD))
1445                 goto nla_put_failure;
1446
1447         nla_nest_end(msg, attr);
1448
1449         return 0;
1450
1451 nla_put_failure:
1452         nla_nest_cancel(msg, attr);
1453         return -EMSGSIZE;
1454 }
1455
1456 static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
1457 {
1458         void *hdr;
1459         int rc;
1460
1461         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1462                           &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
1463         if (!hdr)
1464                 return -EMSGSIZE;
1465
1466         rc = net_dm_stats_put(msg);
1467         if (rc)
1468                 goto nla_put_failure;
1469
1470         rc = net_dm_hw_stats_put(msg);
1471         if (rc)
1472                 goto nla_put_failure;
1473
1474         genlmsg_end(msg, hdr);
1475
1476         return 0;
1477
1478 nla_put_failure:
1479         genlmsg_cancel(msg, hdr);
1480         return -EMSGSIZE;
1481 }
1482
1483 static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
1484 {
1485         struct sk_buff *msg;
1486         int rc;
1487
1488         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1489         if (!msg)
1490                 return -ENOMEM;
1491
1492         rc = net_dm_stats_fill(msg, info);
1493         if (rc)
1494                 goto free_msg;
1495
1496         return genlmsg_reply(msg, info);
1497
1498 free_msg:
1499         nlmsg_free(msg);
1500         return rc;
1501 }
1502
1503 static int dropmon_net_event(struct notifier_block *ev_block,
1504                              unsigned long event, void *ptr)
1505 {
1506         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1507         struct dm_hw_stat_delta *new_stat = NULL;
1508         struct dm_hw_stat_delta *tmp;
1509
1510         switch (event) {
1511         case NETDEV_REGISTER:
1512                 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
1513
1514                 if (!new_stat)
1515                         goto out;
1516
1517                 new_stat->dev = dev;
1518                 new_stat->last_rx = jiffies;
1519                 mutex_lock(&net_dm_mutex);
1520                 list_add_rcu(&new_stat->list, &hw_stats_list);
1521                 mutex_unlock(&net_dm_mutex);
1522                 break;
1523         case NETDEV_UNREGISTER:
1524                 mutex_lock(&net_dm_mutex);
1525                 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
1526                         if (new_stat->dev == dev) {
1527                                 new_stat->dev = NULL;
1528                                 if (trace_state == TRACE_OFF) {
1529                                         list_del_rcu(&new_stat->list);
1530                                         kfree_rcu(new_stat, rcu);
1531                                         break;
1532                                 }
1533                         }
1534                 }
1535                 mutex_unlock(&net_dm_mutex);
1536                 break;
1537         }
1538 out:
1539         return NOTIFY_DONE;
1540 }
1541
1542 static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
1543         [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
1544         [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
1545         [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
1546         [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
1547         [NET_DM_ATTR_SW_DROPS]  = {. type = NLA_FLAG },
1548         [NET_DM_ATTR_HW_DROPS]  = {. type = NLA_FLAG },
1549 };
1550
1551 static const struct genl_ops dropmon_ops[] = {
1552         {
1553                 .cmd = NET_DM_CMD_CONFIG,
1554                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1555                 .doit = net_dm_cmd_config,
1556                 .flags = GENL_ADMIN_PERM,
1557         },
1558         {
1559                 .cmd = NET_DM_CMD_START,
1560                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1561                 .doit = net_dm_cmd_trace,
1562         },
1563         {
1564                 .cmd = NET_DM_CMD_STOP,
1565                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1566                 .doit = net_dm_cmd_trace,
1567         },
1568         {
1569                 .cmd = NET_DM_CMD_CONFIG_GET,
1570                 .doit = net_dm_cmd_config_get,
1571         },
1572         {
1573                 .cmd = NET_DM_CMD_STATS_GET,
1574                 .doit = net_dm_cmd_stats_get,
1575         },
1576 };
1577
1578 static int net_dm_nl_pre_doit(const struct genl_ops *ops,
1579                               struct sk_buff *skb, struct genl_info *info)
1580 {
1581         mutex_lock(&net_dm_mutex);
1582
1583         return 0;
1584 }
1585
1586 static void net_dm_nl_post_doit(const struct genl_ops *ops,
1587                                 struct sk_buff *skb, struct genl_info *info)
1588 {
1589         mutex_unlock(&net_dm_mutex);
1590 }
1591
1592 static struct genl_family net_drop_monitor_family __ro_after_init = {
1593         .hdrsize        = 0,
1594         .name           = "NET_DM",
1595         .version        = 2,
1596         .maxattr        = NET_DM_ATTR_MAX,
1597         .policy         = net_dm_nl_policy,
1598         .pre_doit       = net_dm_nl_pre_doit,
1599         .post_doit      = net_dm_nl_post_doit,
1600         .module         = THIS_MODULE,
1601         .ops            = dropmon_ops,
1602         .n_ops          = ARRAY_SIZE(dropmon_ops),
1603         .mcgrps         = dropmon_mcgrps,
1604         .n_mcgrps       = ARRAY_SIZE(dropmon_mcgrps),
1605 };
1606
1607 static struct notifier_block dropmon_net_notifier = {
1608         .notifier_call = dropmon_net_event
1609 };
1610
1611 static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
1612 {
1613         spin_lock_init(&data->lock);
1614         skb_queue_head_init(&data->drop_queue);
1615         u64_stats_init(&data->stats.syncp);
1616 }
1617
1618 static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data)
1619 {
1620         WARN_ON(!skb_queue_empty(&data->drop_queue));
1621 }
1622
1623 static void net_dm_cpu_data_init(int cpu)
1624 {
1625         struct per_cpu_dm_data *data;
1626
1627         data = &per_cpu(dm_cpu_data, cpu);
1628         __net_dm_cpu_data_init(data);
1629 }
1630
1631 static void net_dm_cpu_data_fini(int cpu)
1632 {
1633         struct per_cpu_dm_data *data;
1634
1635         data = &per_cpu(dm_cpu_data, cpu);
1636         /* At this point, we should have exclusive access
1637          * to this struct and can free the skb inside it.
1638          */
1639         consume_skb(data->skb);
1640         __net_dm_cpu_data_fini(data);
1641 }
1642
1643 static void net_dm_hw_cpu_data_init(int cpu)
1644 {
1645         struct per_cpu_dm_data *hw_data;
1646
1647         hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1648         __net_dm_cpu_data_init(hw_data);
1649 }
1650
1651 static void net_dm_hw_cpu_data_fini(int cpu)
1652 {
1653         struct per_cpu_dm_data *hw_data;
1654
1655         hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1656         kfree(hw_data->hw_entries);
1657         __net_dm_cpu_data_fini(hw_data);
1658 }
1659
1660 static int __init init_net_drop_monitor(void)
1661 {
1662         int cpu, rc;
1663
1664         pr_info("Initializing network drop monitor service\n");
1665
1666         if (sizeof(void *) > 8) {
1667                 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1668                 return -ENOSPC;
1669         }
1670
1671         rc = genl_register_family(&net_drop_monitor_family);
1672         if (rc) {
1673                 pr_err("Could not create drop monitor netlink family\n");
1674                 return rc;
1675         }
1676         WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
1677
1678         rc = register_netdevice_notifier(&dropmon_net_notifier);
1679         if (rc < 0) {
1680                 pr_crit("Failed to register netdevice notifier\n");
1681                 goto out_unreg;
1682         }
1683
1684         rc = 0;
1685
1686         for_each_possible_cpu(cpu) {
1687                 net_dm_cpu_data_init(cpu);
1688                 net_dm_hw_cpu_data_init(cpu);
1689         }
1690
1691         goto out;
1692
1693 out_unreg:
1694         genl_unregister_family(&net_drop_monitor_family);
1695 out:
1696         return rc;
1697 }
1698
1699 static void exit_net_drop_monitor(void)
1700 {
1701         int cpu;
1702
1703         BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
1704
1705         /*
1706          * Because of the module_get/put we do in the trace state change path
1707          * we are guarnateed not to have any current users when we get here
1708          */
1709
1710         for_each_possible_cpu(cpu) {
1711                 net_dm_hw_cpu_data_fini(cpu);
1712                 net_dm_cpu_data_fini(cpu);
1713         }
1714
1715         BUG_ON(genl_unregister_family(&net_drop_monitor_family));
1716 }
1717
1718 module_init(init_net_drop_monitor);
1719 module_exit(exit_net_drop_monitor);
1720
1721 MODULE_LICENSE("GPL v2");
1722 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1723 MODULE_ALIAS_GENL_FAMILY("NET_DM");
1724 MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");