1 // SPDX-License-Identifier: GPL-2.0-only
3 * Monitoring code for network dropped packet alerts
5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/string.h>
13 #include <linux/if_arp.h>
14 #include <linux/inetdevice.h>
15 #include <linux/inet.h>
16 #include <linux/interrupt.h>
17 #include <linux/netpoll.h>
18 #include <linux/sched.h>
19 #include <linux/delay.h>
20 #include <linux/types.h>
21 #include <linux/workqueue.h>
22 #include <linux/netlink.h>
23 #include <linux/net_dropmon.h>
24 #include <linux/percpu.h>
25 #include <linux/timer.h>
26 #include <linux/bitops.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <net/drop_monitor.h>
30 #include <net/genetlink.h>
31 #include <net/netevent.h>
33 #include <trace/events/skb.h>
34 #include <trace/events/napi.h>
36 #include <asm/unaligned.h>
42 * Globals, our netlink socket pointer
43 * and the work handle that will send up
46 static int trace_state = TRACE_OFF;
47 static bool monitor_hw;
51 * An overall lock guarding every operation coming from userspace.
52 * It also guards the global 'hw_stats_list' list.
54 static DEFINE_MUTEX(net_dm_mutex);
58 struct u64_stats_sync syncp;
61 #define NET_DM_MAX_HW_TRAP_NAME_LEN 40
63 struct net_dm_hw_entry {
64 char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN];
68 struct net_dm_hw_entries {
70 struct net_dm_hw_entry entries[0];
73 struct per_cpu_dm_data {
74 spinlock_t lock; /* Protects 'skb', 'hw_entries' and
79 struct net_dm_hw_entries *hw_entries;
81 struct sk_buff_head drop_queue;
82 struct work_struct dm_alert_work;
83 struct timer_list send_timer;
84 struct net_dm_stats stats;
87 struct dm_hw_stat_delta {
88 struct net_device *dev;
89 unsigned long last_rx;
90 struct list_head list;
92 unsigned long last_drop_val;
95 static struct genl_family net_drop_monitor_family;
97 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
98 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data);
100 static int dm_hit_limit = 64;
101 static int dm_delay = 1;
102 static unsigned long dm_hw_check_delta = 2*HZ;
103 static LIST_HEAD(hw_stats_list);
105 static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
106 static u32 net_dm_trunc_len;
107 static u32 net_dm_queue_len = 1000;
109 struct net_dm_alert_ops {
110 void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
112 void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
113 int work, int budget);
114 void (*work_item_func)(struct work_struct *work);
115 void (*hw_work_item_func)(struct work_struct *work);
116 void (*hw_probe)(struct sk_buff *skb,
117 const struct net_dm_hw_metadata *hw_metadata);
120 struct net_dm_skb_cb {
122 struct net_dm_hw_metadata *hw_metadata;
127 #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
129 static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
132 struct net_dm_alert_msg *msg;
138 al = sizeof(struct net_dm_alert_msg);
139 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
140 al += sizeof(struct nlattr);
142 skb = genlmsg_new(al, GFP_KERNEL);
147 msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
148 0, NET_DM_CMD_ALERT);
154 nla = nla_reserve(skb, NLA_UNSPEC,
155 sizeof(struct net_dm_alert_msg));
166 mod_timer(&data->send_timer, jiffies + HZ / 10);
168 spin_lock_irqsave(&data->lock, flags);
169 swap(data->skb, skb);
170 spin_unlock_irqrestore(&data->lock, flags);
173 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
174 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
176 genlmsg_end(skb, genlmsg_data(gnlh));
182 static const struct genl_multicast_group dropmon_mcgrps[] = {
183 { .name = "events", },
186 static void send_dm_alert(struct work_struct *work)
189 struct per_cpu_dm_data *data;
191 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
193 skb = reset_per_cpu_data(data);
196 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
201 * This is the timer function to delay the sending of an alert
202 * in the event that more drops will arrive during the
205 static void sched_send_work(struct timer_list *t)
207 struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
209 schedule_work(&data->dm_alert_work);
212 static void trace_drop_common(struct sk_buff *skb, void *location)
214 struct net_dm_alert_msg *msg;
215 struct net_dm_drop_point *point;
216 struct nlmsghdr *nlh;
219 struct sk_buff *dskb;
220 struct per_cpu_dm_data *data;
223 local_irq_save(flags);
224 data = this_cpu_ptr(&dm_cpu_data);
225 spin_lock(&data->lock);
231 nlh = (struct nlmsghdr *)dskb->data;
232 nla = genlmsg_data(nlmsg_data(nlh));
235 for (i = 0; i < msg->entries; i++) {
236 if (!memcmp(&location, &point->pc, sizeof(void *))) {
242 if (msg->entries == dm_hit_limit)
245 * We need to create a new entry
247 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
248 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
249 memcpy(point->pc, &location, sizeof(void *));
253 if (!timer_pending(&data->send_timer)) {
254 data->send_timer.expires = jiffies + dm_delay * HZ;
255 add_timer(&data->send_timer);
259 spin_unlock_irqrestore(&data->lock, flags);
262 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
264 trace_drop_common(skb, location);
267 static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
268 int work, int budget)
270 struct dm_hw_stat_delta *new_stat;
273 * Don't check napi structures with no associated device
279 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
280 struct net_device *dev;
283 * only add a note to our monitor buffer if:
284 * 1) this is the dev we received on
285 * 2) its after the last_rx delta
286 * 3) our rx_dropped count has gone up
288 /* Paired with WRITE_ONCE() in dropmon_net_event() */
289 dev = READ_ONCE(new_stat->dev);
290 if ((dev == napi->dev) &&
291 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
292 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
293 trace_drop_common(NULL, NULL);
294 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
295 new_stat->last_rx = jiffies;
302 static struct net_dm_hw_entries *
303 net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
305 struct net_dm_hw_entries *hw_entries;
308 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
311 /* If the memory allocation failed, we try to perform another
312 * allocation in 1/10 second. Otherwise, the probe function
313 * will constantly bail out.
315 mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
318 spin_lock_irqsave(&hw_data->lock, flags);
319 swap(hw_data->hw_entries, hw_entries);
320 spin_unlock_irqrestore(&hw_data->lock, flags);
325 static int net_dm_hw_entry_put(struct sk_buff *msg,
326 const struct net_dm_hw_entry *hw_entry)
330 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY);
334 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name))
335 goto nla_put_failure;
337 if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count))
338 goto nla_put_failure;
340 nla_nest_end(msg, attr);
345 nla_nest_cancel(msg, attr);
349 static int net_dm_hw_entries_put(struct sk_buff *msg,
350 const struct net_dm_hw_entries *hw_entries)
355 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES);
359 for (i = 0; i < hw_entries->num_entries; i++) {
362 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]);
364 goto nla_put_failure;
367 nla_nest_end(msg, attr);
372 nla_nest_cancel(msg, attr);
377 net_dm_hw_summary_report_fill(struct sk_buff *msg,
378 const struct net_dm_hw_entries *hw_entries)
380 struct net_dm_alert_msg anc_hdr = { 0 };
384 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
389 /* We need to put the ancillary header in order not to break user
392 if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr))
393 goto nla_put_failure;
395 rc = net_dm_hw_entries_put(msg, hw_entries);
397 goto nla_put_failure;
399 genlmsg_end(msg, hdr);
404 genlmsg_cancel(msg, hdr);
408 static void net_dm_hw_summary_work(struct work_struct *work)
410 struct net_dm_hw_entries *hw_entries;
411 struct per_cpu_dm_data *hw_data;
415 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
417 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
421 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
425 rc = net_dm_hw_summary_report_fill(msg, hw_entries);
431 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
438 net_dm_hw_summary_probe(struct sk_buff *skb,
439 const struct net_dm_hw_metadata *hw_metadata)
441 struct net_dm_hw_entries *hw_entries;
442 struct net_dm_hw_entry *hw_entry;
443 struct per_cpu_dm_data *hw_data;
447 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
448 spin_lock_irqsave(&hw_data->lock, flags);
449 hw_entries = hw_data->hw_entries;
454 for (i = 0; i < hw_entries->num_entries; i++) {
455 hw_entry = &hw_entries->entries[i];
456 if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name,
457 NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
462 if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit))
465 hw_entry = &hw_entries->entries[hw_entries->num_entries];
466 strlcpy(hw_entry->trap_name, hw_metadata->trap_name,
467 NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
469 hw_entries->num_entries++;
471 if (!timer_pending(&hw_data->send_timer)) {
472 hw_data->send_timer.expires = jiffies + dm_delay * HZ;
473 add_timer(&hw_data->send_timer);
477 spin_unlock_irqrestore(&hw_data->lock, flags);
480 static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
481 .kfree_skb_probe = trace_kfree_skb_hit,
482 .napi_poll_probe = trace_napi_poll_hit,
483 .work_item_func = send_dm_alert,
484 .hw_work_item_func = net_dm_hw_summary_work,
485 .hw_probe = net_dm_hw_summary_probe,
488 static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
492 ktime_t tstamp = ktime_get_real();
493 struct per_cpu_dm_data *data;
494 struct sk_buff *nskb;
497 if (!skb_mac_header_was_set(skb))
500 nskb = skb_clone(skb, GFP_ATOMIC);
504 NET_DM_SKB_CB(nskb)->pc = location;
505 /* Override the timestamp because we care about the time when the
506 * packet was dropped.
508 nskb->tstamp = tstamp;
510 data = this_cpu_ptr(&dm_cpu_data);
512 spin_lock_irqsave(&data->drop_queue.lock, flags);
513 if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
514 __skb_queue_tail(&data->drop_queue, nskb);
517 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
519 schedule_work(&data->dm_alert_work);
524 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
525 u64_stats_update_begin(&data->stats.syncp);
526 data->stats.dropped++;
527 u64_stats_update_end(&data->stats.syncp);
531 static void net_dm_packet_trace_napi_poll_hit(void *ignore,
532 struct napi_struct *napi,
533 int work, int budget)
537 static size_t net_dm_in_port_size(void)
539 /* NET_DM_ATTR_IN_PORT nest */
540 return nla_total_size(0) +
541 /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
542 nla_total_size(sizeof(u32)) +
543 /* NET_DM_ATTR_PORT_NETDEV_NAME */
544 nla_total_size(IFNAMSIZ + 1);
547 #define NET_DM_MAX_SYMBOL_LEN 40
549 static size_t net_dm_packet_report_size(size_t payload_len)
553 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
555 return NLMSG_ALIGN(size) +
556 /* NET_DM_ATTR_ORIGIN */
557 nla_total_size(sizeof(u16)) +
559 nla_total_size(sizeof(u64)) +
560 /* NET_DM_ATTR_SYMBOL */
561 nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) +
562 /* NET_DM_ATTR_IN_PORT */
563 net_dm_in_port_size() +
564 /* NET_DM_ATTR_TIMESTAMP */
565 nla_total_size(sizeof(u64)) +
566 /* NET_DM_ATTR_ORIG_LEN */
567 nla_total_size(sizeof(u32)) +
568 /* NET_DM_ATTR_PROTO */
569 nla_total_size(sizeof(u16)) +
570 /* NET_DM_ATTR_PAYLOAD */
571 nla_total_size(payload_len);
574 static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex,
579 attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT);
584 nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex))
585 goto nla_put_failure;
587 if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name))
588 goto nla_put_failure;
590 nla_nest_end(msg, attr);
595 nla_nest_cancel(msg, attr);
599 static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
602 u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc;
603 char buf[NET_DM_MAX_SYMBOL_LEN];
608 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
609 NET_DM_CMD_PACKET_ALERT);
613 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW))
614 goto nla_put_failure;
616 if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD))
617 goto nla_put_failure;
619 snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc);
620 if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf))
621 goto nla_put_failure;
623 rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
625 goto nla_put_failure;
627 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
628 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
629 goto nla_put_failure;
631 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
632 goto nla_put_failure;
637 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
638 goto nla_put_failure;
640 attr = skb_put(msg, nla_total_size(payload_len));
641 attr->nla_type = NET_DM_ATTR_PAYLOAD;
642 attr->nla_len = nla_attr_size(payload_len);
643 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
644 goto nla_put_failure;
647 genlmsg_end(msg, hdr);
652 genlmsg_cancel(msg, hdr);
656 #define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
658 static void net_dm_packet_report(struct sk_buff *skb)
664 /* Make sure we start copying the packet from the MAC header */
665 if (skb->data > skb_mac_header(skb))
666 skb_push(skb, skb->data - skb_mac_header(skb));
668 skb_pull(skb, skb_mac_header(skb) - skb->data);
670 /* Ensure packet fits inside a single netlink attribute */
671 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
672 if (net_dm_trunc_len)
673 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
675 msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL);
679 rc = net_dm_packet_report_fill(msg, skb, payload_len);
685 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
691 static void net_dm_packet_work(struct work_struct *work)
693 struct per_cpu_dm_data *data;
694 struct sk_buff_head list;
698 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
700 __skb_queue_head_init(&list);
702 spin_lock_irqsave(&data->drop_queue.lock, flags);
703 skb_queue_splice_tail_init(&data->drop_queue, &list);
704 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
706 while ((skb = __skb_dequeue(&list)))
707 net_dm_packet_report(skb);
711 net_dm_hw_packet_report_size(size_t payload_len,
712 const struct net_dm_hw_metadata *hw_metadata)
716 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
718 return NLMSG_ALIGN(size) +
719 /* NET_DM_ATTR_ORIGIN */
720 nla_total_size(sizeof(u16)) +
721 /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
722 nla_total_size(strlen(hw_metadata->trap_group_name) + 1) +
723 /* NET_DM_ATTR_HW_TRAP_NAME */
724 nla_total_size(strlen(hw_metadata->trap_name) + 1) +
725 /* NET_DM_ATTR_IN_PORT */
726 net_dm_in_port_size() +
727 /* NET_DM_ATTR_TIMESTAMP */
728 nla_total_size(sizeof(u64)) +
729 /* NET_DM_ATTR_ORIG_LEN */
730 nla_total_size(sizeof(u32)) +
731 /* NET_DM_ATTR_PROTO */
732 nla_total_size(sizeof(u16)) +
733 /* NET_DM_ATTR_PAYLOAD */
734 nla_total_size(payload_len);
737 static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
738 struct sk_buff *skb, size_t payload_len)
740 struct net_dm_hw_metadata *hw_metadata;
744 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
746 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
747 NET_DM_CMD_PACKET_ALERT);
751 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW))
752 goto nla_put_failure;
754 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME,
755 hw_metadata->trap_group_name))
756 goto nla_put_failure;
758 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME,
759 hw_metadata->trap_name))
760 goto nla_put_failure;
762 if (hw_metadata->input_dev) {
763 struct net_device *dev = hw_metadata->input_dev;
766 rc = net_dm_packet_report_in_port_put(msg, dev->ifindex,
769 goto nla_put_failure;
772 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
773 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
774 goto nla_put_failure;
776 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
777 goto nla_put_failure;
782 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
783 goto nla_put_failure;
785 attr = skb_put(msg, nla_total_size(payload_len));
786 attr->nla_type = NET_DM_ATTR_PAYLOAD;
787 attr->nla_len = nla_attr_size(payload_len);
788 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
789 goto nla_put_failure;
792 genlmsg_end(msg, hdr);
797 genlmsg_cancel(msg, hdr);
801 static struct net_dm_hw_metadata *
802 net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
804 struct net_dm_hw_metadata *n_hw_metadata;
805 const char *trap_group_name;
806 const char *trap_name;
808 n_hw_metadata = kmalloc(sizeof(*hw_metadata), GFP_ATOMIC);
812 trap_group_name = kmemdup(hw_metadata->trap_group_name,
813 strlen(hw_metadata->trap_group_name) + 1,
814 GFP_ATOMIC | __GFP_ZERO);
815 if (!trap_group_name)
816 goto free_hw_metadata;
817 n_hw_metadata->trap_group_name = trap_group_name;
819 trap_name = kmemdup(hw_metadata->trap_name,
820 strlen(hw_metadata->trap_name) + 1,
821 GFP_ATOMIC | __GFP_ZERO);
823 goto free_trap_group;
824 n_hw_metadata->trap_name = trap_name;
826 n_hw_metadata->input_dev = hw_metadata->input_dev;
827 if (n_hw_metadata->input_dev)
828 dev_hold(n_hw_metadata->input_dev);
830 return n_hw_metadata;
833 kfree(trap_group_name);
835 kfree(n_hw_metadata);
840 net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
842 if (hw_metadata->input_dev)
843 dev_put(hw_metadata->input_dev);
844 kfree(hw_metadata->trap_name);
845 kfree(hw_metadata->trap_group_name);
849 static void net_dm_hw_packet_report(struct sk_buff *skb)
851 struct net_dm_hw_metadata *hw_metadata;
856 if (skb->data > skb_mac_header(skb))
857 skb_push(skb, skb->data - skb_mac_header(skb));
859 skb_pull(skb, skb_mac_header(skb) - skb->data);
861 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
862 if (net_dm_trunc_len)
863 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
865 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
866 msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata),
871 rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
877 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
880 net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
884 static void net_dm_hw_packet_work(struct work_struct *work)
886 struct per_cpu_dm_data *hw_data;
887 struct sk_buff_head list;
891 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
893 __skb_queue_head_init(&list);
895 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
896 skb_queue_splice_tail_init(&hw_data->drop_queue, &list);
897 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
899 while ((skb = __skb_dequeue(&list)))
900 net_dm_hw_packet_report(skb);
904 net_dm_hw_packet_probe(struct sk_buff *skb,
905 const struct net_dm_hw_metadata *hw_metadata)
907 struct net_dm_hw_metadata *n_hw_metadata;
908 ktime_t tstamp = ktime_get_real();
909 struct per_cpu_dm_data *hw_data;
910 struct sk_buff *nskb;
913 if (!skb_mac_header_was_set(skb))
916 nskb = skb_clone(skb, GFP_ATOMIC);
920 n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata);
924 NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
925 nskb->tstamp = tstamp;
927 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
929 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
930 if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len)
931 __skb_queue_tail(&hw_data->drop_queue, nskb);
934 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
936 schedule_work(&hw_data->dm_alert_work);
941 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
942 u64_stats_update_begin(&hw_data->stats.syncp);
943 hw_data->stats.dropped++;
944 u64_stats_update_end(&hw_data->stats.syncp);
945 net_dm_hw_metadata_free(n_hw_metadata);
950 static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
951 .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit,
952 .napi_poll_probe = net_dm_packet_trace_napi_poll_hit,
953 .work_item_func = net_dm_packet_work,
954 .hw_work_item_func = net_dm_hw_packet_work,
955 .hw_probe = net_dm_hw_packet_probe,
958 static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
959 [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops,
960 [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops,
963 void net_dm_hw_report(struct sk_buff *skb,
964 const struct net_dm_hw_metadata *hw_metadata)
971 net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
976 EXPORT_SYMBOL_GPL(net_dm_hw_report);
978 static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
980 const struct net_dm_alert_ops *ops;
984 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
988 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
990 if (!try_module_get(THIS_MODULE)) {
991 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
995 for_each_possible_cpu(cpu) {
996 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
997 struct net_dm_hw_entries *hw_entries;
999 INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func);
1000 timer_setup(&hw_data->send_timer, sched_send_work, 0);
1001 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
1010 static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
1015 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
1021 /* After this call returns we are guaranteed that no CPU is processing
1022 * any hardware drops.
1026 for_each_possible_cpu(cpu) {
1027 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1028 struct sk_buff *skb;
1030 del_timer_sync(&hw_data->send_timer);
1031 cancel_work_sync(&hw_data->dm_alert_work);
1032 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1033 struct net_dm_hw_metadata *hw_metadata;
1035 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1036 net_dm_hw_metadata_free(hw_metadata);
1041 module_put(THIS_MODULE);
1044 static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
1046 const struct net_dm_alert_ops *ops;
1049 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1051 if (!try_module_get(THIS_MODULE)) {
1052 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1056 for_each_possible_cpu(cpu) {
1057 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1058 struct sk_buff *skb;
1060 INIT_WORK(&data->dm_alert_work, ops->work_item_func);
1061 timer_setup(&data->send_timer, sched_send_work, 0);
1062 /* Allocate a new per-CPU skb for the summary alert message and
1063 * free the old one which might contain stale data from
1066 skb = reset_per_cpu_data(data);
1070 rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1072 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint");
1073 goto err_module_put;
1076 rc = register_trace_napi_poll(ops->napi_poll_probe, NULL);
1078 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint");
1079 goto err_unregister_trace;
1084 err_unregister_trace:
1085 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1087 module_put(THIS_MODULE);
1091 static void net_dm_trace_off_set(void)
1093 struct dm_hw_stat_delta *new_stat, *temp;
1094 const struct net_dm_alert_ops *ops;
1097 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1099 unregister_trace_napi_poll(ops->napi_poll_probe, NULL);
1100 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1102 tracepoint_synchronize_unregister();
1104 /* Make sure we do not send notifications to user space after request
1105 * to stop tracing returns.
1107 for_each_possible_cpu(cpu) {
1108 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1109 struct sk_buff *skb;
1111 del_timer_sync(&data->send_timer);
1112 cancel_work_sync(&data->dm_alert_work);
1113 while ((skb = __skb_dequeue(&data->drop_queue)))
1117 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
1118 if (new_stat->dev == NULL) {
1119 list_del_rcu(&new_stat->list);
1120 kfree_rcu(new_stat, rcu);
1124 module_put(THIS_MODULE);
1127 static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack)
1131 if (state == trace_state) {
1132 NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state");
1138 rc = net_dm_trace_on_set(extack);
1141 net_dm_trace_off_set();
1149 trace_state = state;
1156 static bool net_dm_is_monitoring(void)
1158 return trace_state == TRACE_ON || monitor_hw;
1161 static int net_dm_alert_mode_get_from_info(struct genl_info *info,
1162 enum net_dm_alert_mode *p_alert_mode)
1166 val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
1169 case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */
1170 case NET_DM_ALERT_MODE_PACKET:
1171 *p_alert_mode = val;
1180 static int net_dm_alert_mode_set(struct genl_info *info)
1182 struct netlink_ext_ack *extack = info->extack;
1183 enum net_dm_alert_mode alert_mode;
1186 if (!info->attrs[NET_DM_ATTR_ALERT_MODE])
1189 rc = net_dm_alert_mode_get_from_info(info, &alert_mode);
1191 NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode");
1195 net_dm_alert_mode = alert_mode;
1200 static void net_dm_trunc_len_set(struct genl_info *info)
1202 if (!info->attrs[NET_DM_ATTR_TRUNC_LEN])
1205 net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
1208 static void net_dm_queue_len_set(struct genl_info *info)
1210 if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
1213 net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
1216 static int net_dm_cmd_config(struct sk_buff *skb,
1217 struct genl_info *info)
1219 struct netlink_ext_ack *extack = info->extack;
1222 if (net_dm_is_monitoring()) {
1223 NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring");
1227 rc = net_dm_alert_mode_set(info);
1231 net_dm_trunc_len_set(info);
1233 net_dm_queue_len_set(info);
1238 static int net_dm_monitor_start(bool set_sw, bool set_hw,
1239 struct netlink_ext_ack *extack)
1241 bool sw_set = false;
1245 rc = set_all_monitor_traces(TRACE_ON, extack);
1252 rc = net_dm_hw_monitor_start(extack);
1254 goto err_monitor_hw;
1261 set_all_monitor_traces(TRACE_OFF, extack);
1265 static void net_dm_monitor_stop(bool set_sw, bool set_hw,
1266 struct netlink_ext_ack *extack)
1269 net_dm_hw_monitor_stop(extack);
1271 set_all_monitor_traces(TRACE_OFF, extack);
1274 static int net_dm_cmd_trace(struct sk_buff *skb,
1275 struct genl_info *info)
1277 bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS];
1278 bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS];
1279 struct netlink_ext_ack *extack = info->extack;
1281 /* To maintain backward compatibility, we start / stop monitoring of
1282 * software drops if no flag is specified.
1284 if (!set_sw && !set_hw)
1287 switch (info->genlhdr->cmd) {
1288 case NET_DM_CMD_START:
1289 return net_dm_monitor_start(set_sw, set_hw, extack);
1290 case NET_DM_CMD_STOP:
1291 net_dm_monitor_stop(set_sw, set_hw, extack);
1298 static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info)
1302 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1303 &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW);
1307 if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode))
1308 goto nla_put_failure;
1310 if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
1311 goto nla_put_failure;
1313 if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
1314 goto nla_put_failure;
1316 genlmsg_end(msg, hdr);
1321 genlmsg_cancel(msg, hdr);
1325 static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
1327 struct sk_buff *msg;
1330 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1334 rc = net_dm_config_fill(msg, info);
1338 return genlmsg_reply(msg, info);
1345 static void net_dm_stats_read(struct net_dm_stats *stats)
1349 memset(stats, 0, sizeof(*stats));
1350 for_each_possible_cpu(cpu) {
1351 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1352 struct net_dm_stats *cpu_stats = &data->stats;
1357 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1358 dropped = cpu_stats->dropped;
1359 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1361 stats->dropped += dropped;
1365 static int net_dm_stats_put(struct sk_buff *msg)
1367 struct net_dm_stats stats;
1368 struct nlattr *attr;
1370 net_dm_stats_read(&stats);
1372 attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
1376 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1377 stats.dropped, NET_DM_ATTR_PAD))
1378 goto nla_put_failure;
1380 nla_nest_end(msg, attr);
1385 nla_nest_cancel(msg, attr);
1389 static void net_dm_hw_stats_read(struct net_dm_stats *stats)
1393 memset(stats, 0, sizeof(*stats));
1394 for_each_possible_cpu(cpu) {
1395 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1396 struct net_dm_stats *cpu_stats = &hw_data->stats;
1401 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1402 dropped = cpu_stats->dropped;
1403 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1405 stats->dropped += dropped;
1409 static int net_dm_hw_stats_put(struct sk_buff *msg)
1411 struct net_dm_stats stats;
1412 struct nlattr *attr;
1414 net_dm_hw_stats_read(&stats);
1416 attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS);
1420 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1421 stats.dropped, NET_DM_ATTR_PAD))
1422 goto nla_put_failure;
1424 nla_nest_end(msg, attr);
1429 nla_nest_cancel(msg, attr);
1433 static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
1438 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1439 &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
1443 rc = net_dm_stats_put(msg);
1445 goto nla_put_failure;
1447 rc = net_dm_hw_stats_put(msg);
1449 goto nla_put_failure;
1451 genlmsg_end(msg, hdr);
1456 genlmsg_cancel(msg, hdr);
1460 static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
1462 struct sk_buff *msg;
1465 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1469 rc = net_dm_stats_fill(msg, info);
1473 return genlmsg_reply(msg, info);
1480 static int dropmon_net_event(struct notifier_block *ev_block,
1481 unsigned long event, void *ptr)
1483 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1484 struct dm_hw_stat_delta *new_stat = NULL;
1485 struct dm_hw_stat_delta *tmp;
1488 case NETDEV_REGISTER:
1489 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
1494 new_stat->dev = dev;
1495 new_stat->last_rx = jiffies;
1496 mutex_lock(&net_dm_mutex);
1497 list_add_rcu(&new_stat->list, &hw_stats_list);
1498 mutex_unlock(&net_dm_mutex);
1500 case NETDEV_UNREGISTER:
1501 mutex_lock(&net_dm_mutex);
1502 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
1503 if (new_stat->dev == dev) {
1505 /* Paired with READ_ONCE() in trace_napi_poll_hit() */
1506 WRITE_ONCE(new_stat->dev, NULL);
1508 if (trace_state == TRACE_OFF) {
1509 list_del_rcu(&new_stat->list);
1510 kfree_rcu(new_stat, rcu);
1515 mutex_unlock(&net_dm_mutex);
1522 static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
1523 [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
1524 [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
1525 [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
1526 [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
1527 [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG },
1528 [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG },
1531 static const struct genl_ops dropmon_ops[] = {
1533 .cmd = NET_DM_CMD_CONFIG,
1534 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1535 .doit = net_dm_cmd_config,
1536 .flags = GENL_ADMIN_PERM,
1539 .cmd = NET_DM_CMD_START,
1540 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1541 .doit = net_dm_cmd_trace,
1544 .cmd = NET_DM_CMD_STOP,
1545 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1546 .doit = net_dm_cmd_trace,
1549 .cmd = NET_DM_CMD_CONFIG_GET,
1550 .doit = net_dm_cmd_config_get,
1553 .cmd = NET_DM_CMD_STATS_GET,
1554 .doit = net_dm_cmd_stats_get,
1558 static int net_dm_nl_pre_doit(const struct genl_ops *ops,
1559 struct sk_buff *skb, struct genl_info *info)
1561 mutex_lock(&net_dm_mutex);
1566 static void net_dm_nl_post_doit(const struct genl_ops *ops,
1567 struct sk_buff *skb, struct genl_info *info)
1569 mutex_unlock(&net_dm_mutex);
1572 static struct genl_family net_drop_monitor_family __ro_after_init = {
1576 .maxattr = NET_DM_ATTR_MAX,
1577 .policy = net_dm_nl_policy,
1578 .pre_doit = net_dm_nl_pre_doit,
1579 .post_doit = net_dm_nl_post_doit,
1580 .module = THIS_MODULE,
1582 .n_ops = ARRAY_SIZE(dropmon_ops),
1583 .mcgrps = dropmon_mcgrps,
1584 .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps),
1587 static struct notifier_block dropmon_net_notifier = {
1588 .notifier_call = dropmon_net_event
1591 static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
1593 spin_lock_init(&data->lock);
1594 skb_queue_head_init(&data->drop_queue);
1595 u64_stats_init(&data->stats.syncp);
1598 static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data)
1600 WARN_ON(!skb_queue_empty(&data->drop_queue));
1603 static void net_dm_cpu_data_init(int cpu)
1605 struct per_cpu_dm_data *data;
1607 data = &per_cpu(dm_cpu_data, cpu);
1608 __net_dm_cpu_data_init(data);
1611 static void net_dm_cpu_data_fini(int cpu)
1613 struct per_cpu_dm_data *data;
1615 data = &per_cpu(dm_cpu_data, cpu);
1616 /* At this point, we should have exclusive access
1617 * to this struct and can free the skb inside it.
1619 consume_skb(data->skb);
1620 __net_dm_cpu_data_fini(data);
1623 static void net_dm_hw_cpu_data_init(int cpu)
1625 struct per_cpu_dm_data *hw_data;
1627 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1628 __net_dm_cpu_data_init(hw_data);
1631 static void net_dm_hw_cpu_data_fini(int cpu)
1633 struct per_cpu_dm_data *hw_data;
1635 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1636 kfree(hw_data->hw_entries);
1637 __net_dm_cpu_data_fini(hw_data);
1640 static int __init init_net_drop_monitor(void)
1644 pr_info("Initializing network drop monitor service\n");
1646 if (sizeof(void *) > 8) {
1647 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1651 rc = genl_register_family(&net_drop_monitor_family);
1653 pr_err("Could not create drop monitor netlink family\n");
1656 WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
1658 rc = register_netdevice_notifier(&dropmon_net_notifier);
1660 pr_crit("Failed to register netdevice notifier\n");
1666 for_each_possible_cpu(cpu) {
1667 net_dm_cpu_data_init(cpu);
1668 net_dm_hw_cpu_data_init(cpu);
1674 genl_unregister_family(&net_drop_monitor_family);
1679 static void exit_net_drop_monitor(void)
1683 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
1686 * Because of the module_get/put we do in the trace state change path
1687 * we are guarnateed not to have any current users when we get here
1690 for_each_possible_cpu(cpu) {
1691 net_dm_hw_cpu_data_fini(cpu);
1692 net_dm_cpu_data_fini(cpu);
1695 BUG_ON(genl_unregister_family(&net_drop_monitor_family));
1698 module_init(init_net_drop_monitor);
1699 module_exit(exit_net_drop_monitor);
1701 MODULE_LICENSE("GPL v2");
1702 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1703 MODULE_ALIAS_GENL_FAMILY("NET_DM");