1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
6 #include <linux/preempt.h>
7 #include <linux/spinlock.h>
8 #include <linux/debug_locks.h>
10 #include <linux/smp.h>
11 #include <linux/cpumask.h>
12 #include <linux/irq_work.h>
13 #include <linux/printk.h>
14 #include <linux/kprobes.h>
19 * printk() could not take logbuf_lock in NMI context. Instead,
20 * it uses an alternative implementation that temporary stores
21 * the strings into a per-CPU buffer. The content of the buffer
22 * is later flushed into the main ring buffer via IRQ work.
24 * The alternative implementation is chosen transparently
25 * by examining current printk() context mask stored in @printk_context
28 * The implementation allows to flush the strings also from another CPU.
29 * There are situations when we want to make sure that all buffers
30 * were handled or when IRQs are blocked.
33 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
36 sizeof(struct irq_work))
38 struct printk_safe_seq_buf {
39 atomic_t len; /* length of written data */
40 atomic_t message_lost;
41 struct irq_work work; /* IRQ work that flushes the buffer */
42 unsigned char buffer[SAFE_LOG_BUF_LEN];
45 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
46 static DEFINE_PER_CPU(int, printk_context);
48 static DEFINE_RAW_SPINLOCK(safe_read_lock);
50 #ifdef CONFIG_PRINTK_NMI
51 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
54 /* Get flushed in a more safe context. */
55 static void queue_flush_work(struct printk_safe_seq_buf *s)
57 if (printk_percpu_data_ready())
58 irq_work_queue(&s->work);
62 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
63 * have dedicated buffers, because otherwise printk-safe preempted by
64 * NMI-printk would have overwritten the NMI messages.
66 * The messages are flushed from irq work (or from panic()), possibly,
67 * from other CPU, concurrently with printk_safe_log_store(). Should this
68 * happen, printk_safe_log_store() will notice the buffer->len mismatch
69 * and repeat the write.
71 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
72 const char *fmt, va_list args)
79 len = atomic_read(&s->len);
81 /* The trailing '\0' is not counted into len. */
82 if (len >= sizeof(s->buffer) - 1) {
83 atomic_inc(&s->message_lost);
89 * Make sure that all old data have been read before the buffer
90 * was reset. This is not needed when we just append data.
96 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
102 * Do it once again if the buffer has been flushed in the meantime.
103 * Note that atomic_cmpxchg() is an implicit memory barrier that
104 * makes sure that the data were written before updating s->len.
106 if (atomic_cmpxchg(&s->len, len, len + add) != len)
113 static inline void printk_safe_flush_line(const char *text, int len)
116 * Avoid any console drivers calls from here, because we may be
117 * in NMI or printk_safe context (when in panic). The messages
118 * must go only into the ring buffer at this stage. Consoles will
119 * get explicitly called later when a crashdump is not generated.
121 printk_deferred("%.*s", len, text);
124 /* printk part of the temporary buffer line by line */
125 static int printk_safe_flush_buffer(const char *start, size_t len)
134 /* Print line by line. */
137 printk_safe_flush_line(start, c - start + 1);
143 /* Handle continuous lines or missing new line. */
144 if ((c + 1 < end) && printk_get_level(c)) {
146 c = printk_skip_level(c);
150 printk_safe_flush_line(start, c - start);
160 /* Check if there was a partial line. Ignore pure header. */
161 if (start < end && !header) {
162 static const char newline[] = KERN_CONT "\n";
164 printk_safe_flush_line(start, end - start);
165 printk_safe_flush_line(newline, strlen(newline));
171 static void report_message_lost(struct printk_safe_seq_buf *s)
173 int lost = atomic_xchg(&s->message_lost, 0);
176 printk_deferred("Lost %d message(s)!\n", lost);
180 * Flush data from the associated per-CPU buffer. The function
181 * can be called either via IRQ work or independently.
183 static void __printk_safe_flush(struct irq_work *work)
185 struct printk_safe_seq_buf *s =
186 container_of(work, struct printk_safe_seq_buf, work);
192 * The lock has two functions. First, one reader has to flush all
193 * available message to make the lockless synchronization with
194 * writers easier. Second, we do not want to mix messages from
195 * different CPUs. This is especially important when printing
198 raw_spin_lock_irqsave(&safe_read_lock, flags);
202 len = atomic_read(&s->len);
205 * This is just a paranoid check that nobody has manipulated
206 * the buffer an unexpected way. If we printed something then
207 * @len must only increase. Also it should never overflow the
210 if ((i && i >= len) || len > sizeof(s->buffer)) {
211 const char *msg = "printk_safe_flush: internal error\n";
213 printk_safe_flush_line(msg, strlen(msg));
218 goto out; /* Someone else has already flushed the buffer. */
220 /* Make sure that data has been written up to the @len */
222 i += printk_safe_flush_buffer(s->buffer + i, len - i);
225 * Check that nothing has got added in the meantime and truncate
226 * the buffer. Note that atomic_cmpxchg() is an implicit memory
227 * barrier that makes sure that the data were copied before
230 if (atomic_cmpxchg(&s->len, len, 0) != len)
234 report_message_lost(s);
235 raw_spin_unlock_irqrestore(&safe_read_lock, flags);
239 * printk_safe_flush - flush all per-cpu nmi buffers.
241 * The buffers are flushed automatically via IRQ work. This function
242 * is useful only when someone wants to be sure that all buffers have
243 * been flushed at some point.
245 void printk_safe_flush(void)
249 for_each_possible_cpu(cpu) {
250 #ifdef CONFIG_PRINTK_NMI
251 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
253 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
258 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
261 * Similar to printk_safe_flush() but it can be called even in NMI context when
262 * the system goes down. It does the best effort to get NMI messages into
263 * the main ring buffer.
265 * Note that it could try harder when there is only one CPU online.
267 void printk_safe_flush_on_panic(void)
270 * Make sure that we could access the main ring buffer.
271 * Do not risk a double release when more CPUs are up.
273 if (raw_spin_is_locked(&logbuf_lock)) {
274 if (num_online_cpus() > 1)
278 raw_spin_lock_init(&logbuf_lock);
281 if (raw_spin_is_locked(&safe_read_lock)) {
282 if (num_online_cpus() > 1)
286 raw_spin_lock_init(&safe_read_lock);
292 #ifdef CONFIG_PRINTK_NMI
294 * Safe printk() for NMI context. It uses a per-CPU buffer to
295 * store the message. NMIs are not nested, so there is always only
296 * one writer running. But the buffer might get flushed from another
297 * CPU, so we need to be careful.
299 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
301 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
303 return printk_safe_log_store(s, fmt, args);
306 void noinstr printk_nmi_enter(void)
308 this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
311 void noinstr printk_nmi_exit(void)
313 this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
317 * Marks a code that might produce many messages in NMI context
318 * and the risk of losing them is more critical than eventual
321 * It has effect only when called in NMI context. Then printk()
322 * will try to store the messages into the main logbuf directly
323 * and use the per-CPU buffers only as a fallback when the lock
326 void printk_nmi_direct_enter(void)
328 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
329 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
332 void printk_nmi_direct_exit(void)
334 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
339 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
344 #endif /* CONFIG_PRINTK_NMI */
347 * Lock-less printk(), to avoid deadlocks should the printk() recurse
348 * into itself. It uses a per-CPU buffer to store the message, just like
351 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
353 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
355 return printk_safe_log_store(s, fmt, args);
358 /* Can be preempted by NMI. */
359 void __printk_safe_enter(void)
361 this_cpu_inc(printk_context);
364 /* Can be preempted by NMI. */
365 void __printk_safe_exit(void)
367 this_cpu_dec(printk_context);
370 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
372 #ifdef CONFIG_KGDB_KDB
373 /* Allow to pass printk() to kdb but avoid a recursion. */
374 if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
375 return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
379 * Try to use the main logbuf even in NMI. But avoid calling console
380 * drivers that might have their own locks.
382 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
383 raw_spin_trylock(&logbuf_lock)) {
386 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
387 raw_spin_unlock(&logbuf_lock);
388 defer_console_output();
392 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
393 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
394 return vprintk_nmi(fmt, args);
396 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
397 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
398 return vprintk_safe(fmt, args);
401 return vprintk_default(fmt, args);
404 void __init printk_safe_init(void)
408 for_each_possible_cpu(cpu) {
409 struct printk_safe_seq_buf *s;
411 s = &per_cpu(safe_print_seq, cpu);
412 init_irq_work(&s->work, __printk_safe_flush);
414 #ifdef CONFIG_PRINTK_NMI
415 s = &per_cpu(nmi_print_seq, cpu);
416 init_irq_work(&s->work, __printk_safe_flush);
420 /* Flush pending messages that did not have scheduled IRQ works. */