1 // SPDX-License-Identifier: GPL-2.0-only
3 * Common framework for low-level network console, dump, and debugger code
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
7 * based on the netconsole code from:
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/string.h>
20 #include <linux/if_arp.h>
21 #include <linux/inetdevice.h>
22 #include <linux/inet.h>
23 #include <linux/interrupt.h>
24 #include <linux/netpoll.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/rcupdate.h>
28 #include <linux/workqueue.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/if_vlan.h>
35 #include <net/addrconf.h>
36 #include <net/ndisc.h>
37 #include <net/ip6_checksum.h>
38 #include <asm/unaligned.h>
39 #include <trace/events/napi.h>
42 * We maintain a small pool of fully-sized skbs, to make sure the
43 * message gets out even in extreme OOM situations.
46 #define MAX_UDP_CHUNK 1460
49 static struct sk_buff_head skb_pool;
51 DEFINE_STATIC_SRCU(netpoll_srcu);
53 #define USEC_PER_POLL 50
55 #define MAX_SKB_SIZE \
56 (sizeof(struct ethhdr) + \
57 sizeof(struct iphdr) + \
58 sizeof(struct udphdr) + \
61 static void zap_completion_queue(void);
63 static unsigned int carrier_timeout = 4;
64 module_param(carrier_timeout, uint, 0644);
66 #define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
73 static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
74 struct netdev_queue *txq)
76 int status = NETDEV_TX_OK;
77 netdev_features_t features;
79 features = netif_skb_features(skb);
81 if (skb_vlan_tag_present(skb) &&
82 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
83 skb = __vlan_hwaccel_push_inside(skb);
85 /* This is actually a packet drop, but we
86 * don't want the code that calls this
87 * function to try and operate on a NULL skb.
93 status = netdev_start_xmit(skb, dev, txq, false);
99 static void queue_process(struct work_struct *work)
101 struct netpoll_info *npinfo =
102 container_of(work, struct netpoll_info, tx_work.work);
106 while ((skb = skb_dequeue(&npinfo->txq))) {
107 struct net_device *dev = skb->dev;
108 struct netdev_queue *txq;
109 unsigned int q_index;
111 if (!netif_device_present(dev) || !netif_running(dev)) {
116 local_irq_save(flags);
117 /* check if skb->queue_mapping is still valid */
118 q_index = skb_get_queue_mapping(skb);
119 if (unlikely(q_index >= dev->real_num_tx_queues)) {
120 q_index = q_index % dev->real_num_tx_queues;
121 skb_set_queue_mapping(skb, q_index);
123 txq = netdev_get_tx_queue(dev, q_index);
124 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (netif_xmit_frozen_or_stopped(txq) ||
126 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
127 skb_queue_head(&npinfo->txq, skb);
128 HARD_TX_UNLOCK(dev, txq);
129 local_irq_restore(flags);
131 schedule_delayed_work(&npinfo->tx_work, HZ/10);
134 HARD_TX_UNLOCK(dev, txq);
135 local_irq_restore(flags);
139 static int netif_local_xmit_active(struct net_device *dev)
143 for (i = 0; i < dev->num_tx_queues; i++) {
144 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146 if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
153 static void poll_one_napi(struct napi_struct *napi)
157 /* If we set this bit but see that it has already been set,
158 * that indicates that napi has been disabled and we need
159 * to abort this operation
161 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
164 /* We explicilty pass the polling call a budget of 0 to
165 * indicate that we are clearing the Tx path only.
167 work = napi->poll(napi, 0);
168 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
169 trace_napi_poll(napi, work, 0);
171 clear_bit(NAPI_STATE_NPSVC, &napi->state);
174 static void poll_napi(struct net_device *dev)
176 struct napi_struct *napi;
177 int cpu = smp_processor_id();
179 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
180 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182 smp_store_release(&napi->poll_owner, -1);
187 void netpoll_poll_dev(struct net_device *dev)
189 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
190 const struct net_device_ops *ops;
192 /* Don't do any rx activity if the dev_lock mutex is held
193 * the dev_open/close paths use this to block netpoll activity
194 * while changing device state
196 if (!ni || down_trylock(&ni->dev_lock))
199 /* Some drivers will take the same locks in poll and xmit,
200 * we can't poll if local CPU is already in xmit.
202 if (!netif_running(dev) || netif_local_xmit_active(dev)) {
207 ops = dev->netdev_ops;
208 if (ops->ndo_poll_controller)
209 ops->ndo_poll_controller(dev);
215 zap_completion_queue();
217 EXPORT_SYMBOL(netpoll_poll_dev);
219 void netpoll_poll_disable(struct net_device *dev)
221 struct netpoll_info *ni;
224 idx = srcu_read_lock(&netpoll_srcu);
225 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
228 srcu_read_unlock(&netpoll_srcu, idx);
230 EXPORT_SYMBOL(netpoll_poll_disable);
232 void netpoll_poll_enable(struct net_device *dev)
234 struct netpoll_info *ni;
236 ni = rcu_dereference(dev->npinfo);
241 EXPORT_SYMBOL(netpoll_poll_enable);
243 static void refill_skbs(void)
248 spin_lock_irqsave(&skb_pool.lock, flags);
249 while (skb_pool.qlen < MAX_SKBS) {
250 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
254 __skb_queue_tail(&skb_pool, skb);
256 spin_unlock_irqrestore(&skb_pool.lock, flags);
259 static void zap_completion_queue(void)
262 struct softnet_data *sd = &get_cpu_var(softnet_data);
264 if (sd->completion_queue) {
265 struct sk_buff *clist;
267 local_irq_save(flags);
268 clist = sd->completion_queue;
269 sd->completion_queue = NULL;
270 local_irq_restore(flags);
272 while (clist != NULL) {
273 struct sk_buff *skb = clist;
275 if (!skb_irq_freeable(skb)) {
276 refcount_set(&skb->users, 1);
277 dev_kfree_skb_any(skb); /* put this one back */
284 put_cpu_var(softnet_data);
287 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
292 zap_completion_queue();
296 skb = alloc_skb(len, GFP_ATOMIC);
298 skb = skb_dequeue(&skb_pool);
302 netpoll_poll_dev(np->dev);
308 refcount_set(&skb->users, 1);
309 skb_reserve(skb, reserve);
313 static int netpoll_owner_active(struct net_device *dev)
315 struct napi_struct *napi;
317 list_for_each_entry(napi, &dev->napi_list, dev_list) {
318 if (napi->poll_owner == smp_processor_id())
324 /* call with IRQ disabled */
325 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
326 struct net_device *dev)
328 int status = NETDEV_TX_BUSY;
330 /* It is up to the caller to keep npinfo alive. */
331 struct netpoll_info *npinfo;
333 lockdep_assert_irqs_disabled();
335 npinfo = rcu_dereference_bh(np->dev->npinfo);
336 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
337 dev_kfree_skb_irq(skb);
341 /* don't get messages out of order, and no recursion */
342 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
343 struct netdev_queue *txq;
345 txq = netdev_core_pick_tx(dev, skb, NULL);
347 /* try until next clock tick */
348 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
349 tries > 0; --tries) {
350 if (HARD_TX_TRYLOCK(dev, txq)) {
351 if (!netif_xmit_stopped(txq))
352 status = netpoll_start_xmit(skb, dev, txq);
354 HARD_TX_UNLOCK(dev, txq);
356 if (dev_xmit_complete(status))
361 /* tickle device maybe there is some cleanup */
362 netpoll_poll_dev(np->dev);
364 udelay(USEC_PER_POLL);
367 WARN_ONCE(!irqs_disabled(),
368 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
369 dev->name, dev->netdev_ops->ndo_start_xmit);
373 if (!dev_xmit_complete(status)) {
374 skb_queue_tail(&npinfo->txq, skb);
375 schedule_delayed_work(&npinfo->tx_work,0);
378 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
380 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
382 int total_len, ip_len, udp_len;
387 static atomic_t ip_ident;
388 struct ipv6hdr *ip6h;
390 WARN_ON_ONCE(!irqs_disabled());
392 udp_len = len + sizeof(*udph);
394 ip_len = udp_len + sizeof(*ip6h);
396 ip_len = udp_len + sizeof(*iph);
398 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
400 skb = find_skb(np, total_len + np->dev->needed_tailroom,
405 skb_copy_to_linear_data(skb, msg, len);
408 skb_push(skb, sizeof(*udph));
409 skb_reset_transport_header(skb);
411 udph->source = htons(np->local_port);
412 udph->dest = htons(np->remote_port);
413 udph->len = htons(udp_len);
417 udph->check = csum_ipv6_magic(&np->local_ip.in6,
419 udp_len, IPPROTO_UDP,
420 csum_partial(udph, udp_len, 0));
421 if (udph->check == 0)
422 udph->check = CSUM_MANGLED_0;
424 skb_push(skb, sizeof(*ip6h));
425 skb_reset_network_header(skb);
426 ip6h = ipv6_hdr(skb);
428 /* ip6h->version = 6; ip6h->priority = 0; */
429 put_unaligned(0x60, (unsigned char *)ip6h);
430 ip6h->flow_lbl[0] = 0;
431 ip6h->flow_lbl[1] = 0;
432 ip6h->flow_lbl[2] = 0;
434 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
435 ip6h->nexthdr = IPPROTO_UDP;
436 ip6h->hop_limit = 32;
437 ip6h->saddr = np->local_ip.in6;
438 ip6h->daddr = np->remote_ip.in6;
440 eth = skb_push(skb, ETH_HLEN);
441 skb_reset_mac_header(skb);
442 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
445 udph->check = csum_tcpudp_magic(np->local_ip.ip,
447 udp_len, IPPROTO_UDP,
448 csum_partial(udph, udp_len, 0));
449 if (udph->check == 0)
450 udph->check = CSUM_MANGLED_0;
452 skb_push(skb, sizeof(*iph));
453 skb_reset_network_header(skb);
456 /* iph->version = 4; iph->ihl = 5; */
457 put_unaligned(0x45, (unsigned char *)iph);
459 put_unaligned(htons(ip_len), &(iph->tot_len));
460 iph->id = htons(atomic_inc_return(&ip_ident));
463 iph->protocol = IPPROTO_UDP;
465 put_unaligned(np->local_ip.ip, &(iph->saddr));
466 put_unaligned(np->remote_ip.ip, &(iph->daddr));
467 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
469 eth = skb_push(skb, ETH_HLEN);
470 skb_reset_mac_header(skb);
471 skb->protocol = eth->h_proto = htons(ETH_P_IP);
474 ether_addr_copy(eth->h_source, np->dev->dev_addr);
475 ether_addr_copy(eth->h_dest, np->remote_mac);
479 netpoll_send_skb(np, skb);
481 EXPORT_SYMBOL(netpoll_send_udp);
483 void netpoll_print_options(struct netpoll *np)
485 np_info(np, "local port %d\n", np->local_port);
487 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
489 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
490 np_info(np, "interface '%s'\n", np->dev_name);
491 np_info(np, "remote port %d\n", np->remote_port);
493 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
495 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
496 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
498 EXPORT_SYMBOL(netpoll_print_options);
500 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
504 if (!strchr(str, ':') &&
505 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
509 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
510 #if IS_ENABLED(CONFIG_IPV6)
520 int netpoll_parse_options(struct netpoll *np, char *opt)
522 char *cur=opt, *delim;
524 bool ipversion_set = false;
527 if ((delim = strchr(cur, '@')) == NULL)
530 if (kstrtou16(cur, 10, &np->local_port))
537 ipversion_set = true;
538 if ((delim = strchr(cur, '/')) == NULL)
541 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
545 np->ipv6 = (bool)ipv6;
551 /* parse out dev name */
552 if ((delim = strchr(cur, ',')) == NULL)
555 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
562 if ((delim = strchr(cur, '@')) == NULL)
565 if (*cur == ' ' || *cur == '\t')
566 np_info(np, "warning: whitespace is not allowed\n");
567 if (kstrtou16(cur, 10, &np->remote_port))
574 if ((delim = strchr(cur, '/')) == NULL)
577 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
580 else if (ipversion_set && np->ipv6 != (bool)ipv6)
583 np->ipv6 = (bool)ipv6;
588 if (!mac_pton(cur, np->remote_mac))
592 netpoll_print_options(np);
597 np_info(np, "couldn't parse config at '%s'!\n", cur);
600 EXPORT_SYMBOL(netpoll_parse_options);
602 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
604 struct netpoll_info *npinfo;
605 const struct net_device_ops *ops;
609 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
611 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
612 np_err(np, "%s doesn't support polling, aborting\n",
619 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
625 sema_init(&npinfo->dev_lock, 1);
626 skb_queue_head_init(&npinfo->txq);
627 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
629 refcount_set(&npinfo->refcnt, 1);
631 ops = np->dev->netdev_ops;
632 if (ops->ndo_netpoll_setup) {
633 err = ops->ndo_netpoll_setup(ndev, npinfo);
638 npinfo = rtnl_dereference(ndev->npinfo);
639 refcount_inc(&npinfo->refcnt);
642 npinfo->netpoll = np;
644 /* last thing to do is link it to the net device structure */
645 rcu_assign_pointer(ndev->npinfo, npinfo);
654 EXPORT_SYMBOL_GPL(__netpoll_setup);
656 int netpoll_setup(struct netpoll *np)
658 struct net_device *ndev = NULL, *dev = NULL;
659 struct net *net = current->nsproxy->net_ns;
660 struct in_device *in_dev;
665 ndev = __dev_get_by_name(net, np->dev_name);
668 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
674 /* bring up DSA management network devices up first */
675 for_each_netdev(net, dev) {
676 if (!netdev_uses_dsa(dev))
679 err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
681 np_err(np, "%s failed to open %s\n",
682 np->dev_name, dev->name);
687 if (netdev_master_upper_dev_get(ndev)) {
688 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
693 if (!netif_running(ndev)) {
694 unsigned long atmost, atleast;
696 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
698 err = dev_open(ndev, NULL);
701 np_err(np, "failed to open %s\n", ndev->name);
706 atleast = jiffies + HZ/10;
707 atmost = jiffies + carrier_timeout * HZ;
708 while (!netif_carrier_ok(ndev)) {
709 if (time_after(jiffies, atmost)) {
710 np_notice(np, "timeout waiting for carrier\n");
716 /* If carrier appears to come up instantly, we don't
717 * trust it and pause so that we don't pump all our
718 * queued console messages into the bitbucket.
721 if (time_before(jiffies, atleast)) {
722 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
728 if (!np->local_ip.ip) {
730 const struct in_ifaddr *ifa;
732 in_dev = __in_dev_get_rtnl(ndev);
736 ifa = rtnl_dereference(in_dev->ifa_list);
739 np_err(np, "no IP address for %s, aborting\n",
745 np->local_ip.ip = ifa->ifa_local;
746 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
748 #if IS_ENABLED(CONFIG_IPV6)
749 struct inet6_dev *idev;
752 idev = __in6_dev_get(ndev);
754 struct inet6_ifaddr *ifp;
756 read_lock_bh(&idev->lock);
757 list_for_each_entry(ifp, &idev->addr_list, if_list) {
758 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
759 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
761 np->local_ip.in6 = ifp->addr;
765 read_unlock_bh(&idev->lock);
768 np_err(np, "no IPv6 address for %s, aborting\n",
772 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
774 np_err(np, "IPv6 is not supported %s, aborting\n",
782 /* fill up the skb queue */
785 err = __netpoll_setup(np, ndev);
798 EXPORT_SYMBOL(netpoll_setup);
800 static int __init netpoll_init(void)
802 skb_queue_head_init(&skb_pool);
805 core_initcall(netpoll_init);
807 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
809 struct netpoll_info *npinfo =
810 container_of(rcu_head, struct netpoll_info, rcu);
812 skb_queue_purge(&npinfo->txq);
814 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
815 cancel_delayed_work(&npinfo->tx_work);
817 /* clean after last, unfinished work */
818 __skb_queue_purge(&npinfo->txq);
819 /* now cancel it again */
820 cancel_delayed_work(&npinfo->tx_work);
824 void __netpoll_cleanup(struct netpoll *np)
826 struct netpoll_info *npinfo;
828 npinfo = rtnl_dereference(np->dev->npinfo);
832 synchronize_srcu(&netpoll_srcu);
834 if (refcount_dec_and_test(&npinfo->refcnt)) {
835 const struct net_device_ops *ops;
837 ops = np->dev->netdev_ops;
838 if (ops->ndo_netpoll_cleanup)
839 ops->ndo_netpoll_cleanup(np->dev);
841 RCU_INIT_POINTER(np->dev->npinfo, NULL);
842 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
844 RCU_INIT_POINTER(np->dev->npinfo, NULL);
846 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
848 void __netpoll_free(struct netpoll *np)
852 /* Wait for transmitting packets to finish before freeing. */
854 __netpoll_cleanup(np);
857 EXPORT_SYMBOL_GPL(__netpoll_free);
859 void netpoll_cleanup(struct netpoll *np)
864 __netpoll_cleanup(np);
870 EXPORT_SYMBOL(netpoll_cleanup);