2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Consolidation of architecture support code for profiling,
11 * Nadia Yvette Chambers, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
17 #include <linux/export.h>
18 #include <linux/profile.h>
19 #include <linux/bootmem.h>
20 #include <linux/notifier.h>
22 #include <linux/cpumask.h>
23 #include <linux/cpu.h>
24 #include <linux/highmem.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched/stat.h>
30 #include <asm/sections.h>
31 #include <asm/irq_regs.h>
32 #include <asm/ptrace.h>
37 #define PROFILE_GRPSHIFT 3
38 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
39 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
40 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
42 static atomic_t *prof_buffer;
43 static unsigned long prof_len;
44 static unsigned short int prof_shift;
46 int prof_on __read_mostly;
47 EXPORT_SYMBOL_GPL(prof_on);
49 static cpumask_var_t prof_cpu_mask;
50 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
51 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
52 static DEFINE_PER_CPU(int, cpu_profile_flip);
53 static DEFINE_MUTEX(profile_flip_mutex);
54 #endif /* CONFIG_SMP */
56 int profile_setup(char *str)
58 static const char schedstr[] = "schedule";
59 static const char sleepstr[] = "sleep";
60 static const char kvmstr[] = "kvm";
63 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
64 #ifdef CONFIG_SCHEDSTATS
65 force_schedstat_enabled();
66 prof_on = SLEEP_PROFILING;
67 if (str[strlen(sleepstr)] == ',')
68 str += strlen(sleepstr) + 1;
69 if (get_option(&str, &par))
70 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
71 pr_info("kernel sleep profiling enabled (shift: %u)\n",
74 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
75 #endif /* CONFIG_SCHEDSTATS */
76 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
77 prof_on = SCHED_PROFILING;
78 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
82 pr_info("kernel schedule profiling enabled (shift: %u)\n",
84 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
85 prof_on = KVM_PROFILING;
86 if (str[strlen(kvmstr)] == ',')
87 str += strlen(kvmstr) + 1;
88 if (get_option(&str, &par))
89 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
90 pr_info("kernel KVM profiling enabled (shift: %u)\n",
92 } else if (get_option(&str, &par)) {
93 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
94 prof_on = CPU_PROFILING;
95 pr_info("kernel profiling enabled (shift: %u)\n",
100 __setup("profile=", profile_setup);
103 int __ref profile_init(void)
109 /* only text is profiled */
110 prof_len = (_etext - _stext) >> prof_shift;
113 pr_warn("profiling shift: %u too large\n", prof_shift);
118 buffer_bytes = prof_len*sizeof(atomic_t);
120 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
123 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
125 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
129 prof_buffer = alloc_pages_exact(buffer_bytes,
130 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
134 prof_buffer = vzalloc(buffer_bytes);
138 free_cpumask_var(prof_cpu_mask);
142 /* Profile event notifications */
144 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
145 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
146 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
148 void profile_task_exit(struct task_struct *task)
150 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
153 int profile_handoff_task(struct task_struct *task)
156 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
157 return (ret == NOTIFY_OK) ? 1 : 0;
160 void profile_munmap(unsigned long addr)
162 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
165 int task_handoff_register(struct notifier_block *n)
167 return atomic_notifier_chain_register(&task_free_notifier, n);
169 EXPORT_SYMBOL_GPL(task_handoff_register);
171 int task_handoff_unregister(struct notifier_block *n)
173 return atomic_notifier_chain_unregister(&task_free_notifier, n);
175 EXPORT_SYMBOL_GPL(task_handoff_unregister);
177 int profile_event_register(enum profile_type type, struct notifier_block *n)
182 case PROFILE_TASK_EXIT:
183 err = blocking_notifier_chain_register(
184 &task_exit_notifier, n);
187 err = blocking_notifier_chain_register(
188 &munmap_notifier, n);
194 EXPORT_SYMBOL_GPL(profile_event_register);
196 int profile_event_unregister(enum profile_type type, struct notifier_block *n)
201 case PROFILE_TASK_EXIT:
202 err = blocking_notifier_chain_unregister(
203 &task_exit_notifier, n);
206 err = blocking_notifier_chain_unregister(
207 &munmap_notifier, n);
213 EXPORT_SYMBOL_GPL(profile_event_unregister);
215 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
217 * Each cpu has a pair of open-addressed hashtables for pending
218 * profile hits. read_profile() IPI's all cpus to request them
219 * to flip buffers and flushes their contents to prof_buffer itself.
220 * Flip requests are serialized by the profile_flip_mutex. The sole
221 * use of having a second hashtable is for avoiding cacheline
222 * contention that would otherwise happen during flushes of pending
223 * profile hits required for the accuracy of reported profile hits
224 * and so resurrect the interrupt livelock issue.
226 * The open-addressed hashtables are indexed by profile buffer slot
227 * and hold the number of pending hits to that profile buffer slot on
228 * a cpu in an entry. When the hashtable overflows, all pending hits
229 * are accounted to their corresponding profile buffer slots with
230 * atomic_add() and the hashtable emptied. As numerous pending hits
231 * may be accounted to a profile buffer slot in a hashtable entry,
232 * this amortizes a number of atomic profile buffer increments likely
233 * to be far larger than the number of entries in the hashtable,
234 * particularly given that the number of distinct profile buffer
235 * positions to which hits are accounted during short intervals (e.g.
236 * several seconds) is usually very small. Exclusion from buffer
237 * flipping is provided by interrupt disablement (note that for
238 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
240 * The hash function is meant to be lightweight as opposed to strong,
241 * and was vaguely inspired by ppc64 firmware-supported inverted
242 * pagetable hash functions, but uses a full hashtable full of finite
243 * collision chains, not just pairs of them.
247 static void __profile_flip_buffers(void *unused)
249 int cpu = smp_processor_id();
251 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
254 static void profile_flip_buffers(void)
258 mutex_lock(&profile_flip_mutex);
259 j = per_cpu(cpu_profile_flip, get_cpu());
261 on_each_cpu(__profile_flip_buffers, NULL, 1);
262 for_each_online_cpu(cpu) {
263 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
264 for (i = 0; i < NR_PROFILE_HIT; ++i) {
270 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
271 hits[i].hits = hits[i].pc = 0;
274 mutex_unlock(&profile_flip_mutex);
277 static void profile_discard_flip_buffers(void)
281 mutex_lock(&profile_flip_mutex);
282 i = per_cpu(cpu_profile_flip, get_cpu());
284 on_each_cpu(__profile_flip_buffers, NULL, 1);
285 for_each_online_cpu(cpu) {
286 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
287 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
289 mutex_unlock(&profile_flip_mutex);
292 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
294 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
296 struct profile_hit *hits;
298 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
299 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
300 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
302 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
308 * We buffer the global profiler buffer into a per-CPU
309 * queue and thus reduce the number of global (and possibly
310 * NUMA-alien) accesses. The write-queue is self-coalescing:
312 local_irq_save(flags);
314 for (j = 0; j < PROFILE_GRPSZ; ++j) {
315 if (hits[i + j].pc == pc) {
316 hits[i + j].hits += nr_hits;
318 } else if (!hits[i + j].hits) {
320 hits[i + j].hits = nr_hits;
324 i = (i + secondary) & (NR_PROFILE_HIT - 1);
325 } while (i != primary);
328 * Add the current hit(s) and flush the write-queue out
329 * to the global buffer:
331 atomic_add(nr_hits, &prof_buffer[pc]);
332 for (i = 0; i < NR_PROFILE_HIT; ++i) {
333 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
334 hits[i].pc = hits[i].hits = 0;
337 local_irq_restore(flags);
341 static int profile_dead_cpu(unsigned int cpu)
346 if (prof_cpu_mask != NULL)
347 cpumask_clear_cpu(cpu, prof_cpu_mask);
349 for (i = 0; i < 2; i++) {
350 if (per_cpu(cpu_profile_hits, cpu)[i]) {
351 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
352 per_cpu(cpu_profile_hits, cpu)[i] = NULL;
359 static int profile_prepare_cpu(unsigned int cpu)
361 int i, node = cpu_to_mem(cpu);
364 per_cpu(cpu_profile_flip, cpu) = 0;
366 for (i = 0; i < 2; i++) {
367 if (per_cpu(cpu_profile_hits, cpu)[i])
370 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
372 profile_dead_cpu(cpu);
375 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
381 static int profile_online_cpu(unsigned int cpu)
383 if (prof_cpu_mask != NULL)
384 cpumask_set_cpu(cpu, prof_cpu_mask);
389 #else /* !CONFIG_SMP */
390 #define profile_flip_buffers() do { } while (0)
391 #define profile_discard_flip_buffers() do { } while (0)
393 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
396 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
397 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
399 #endif /* !CONFIG_SMP */
401 void profile_hits(int type, void *__pc, unsigned int nr_hits)
403 if (prof_on != type || !prof_buffer)
405 do_profile_hits(type, __pc, nr_hits);
407 EXPORT_SYMBOL_GPL(profile_hits);
409 void profile_tick(int type)
411 struct pt_regs *regs = get_irq_regs();
413 if (!user_mode(regs) && prof_cpu_mask != NULL &&
414 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
415 profile_hit(type, (void *)profile_pc(regs));
418 #ifdef CONFIG_PROC_FS
419 #include <linux/proc_fs.h>
420 #include <linux/seq_file.h>
421 #include <linux/uaccess.h>
423 static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
425 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
429 static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
431 return single_open(file, prof_cpu_mask_proc_show, NULL);
434 static ssize_t prof_cpu_mask_proc_write(struct file *file,
435 const char __user *buffer, size_t count, loff_t *pos)
437 cpumask_var_t new_value;
440 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
443 err = cpumask_parse_user(buffer, count, new_value);
445 cpumask_copy(prof_cpu_mask, new_value);
448 free_cpumask_var(new_value);
452 static const struct file_operations prof_cpu_mask_proc_fops = {
453 .open = prof_cpu_mask_proc_open,
456 .release = single_release,
457 .write = prof_cpu_mask_proc_write,
460 void create_prof_cpu_mask(void)
462 /* create /proc/irq/prof_cpu_mask */
463 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
467 * This function accesses profiling information. The returned data is
468 * binary: the sampling step and the actual contents of the profile
469 * buffer. Use of the program readprofile is recommended in order to
470 * get meaningful info out of these data.
473 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
475 unsigned long p = *ppos;
478 unsigned long sample_step = 1UL << prof_shift;
480 profile_flip_buffers();
481 if (p >= (prof_len+1)*sizeof(unsigned int))
483 if (count > (prof_len+1)*sizeof(unsigned int) - p)
484 count = (prof_len+1)*sizeof(unsigned int) - p;
487 while (p < sizeof(unsigned int) && count > 0) {
488 if (put_user(*((char *)(&sample_step)+p), buf))
490 buf++; p++; count--; read++;
492 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
493 if (copy_to_user(buf, (void *)pnt, count))
501 * Writing to /proc/profile resets the counters
503 * Writing a 'profiling multiplier' value into it also re-sets the profiling
504 * interrupt frequency, on architectures that support this.
506 static ssize_t write_profile(struct file *file, const char __user *buf,
507 size_t count, loff_t *ppos)
510 extern int setup_profiling_timer(unsigned int multiplier);
512 if (count == sizeof(int)) {
513 unsigned int multiplier;
515 if (copy_from_user(&multiplier, buf, sizeof(int)))
518 if (setup_profiling_timer(multiplier))
522 profile_discard_flip_buffers();
523 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
527 static const struct file_operations proc_profile_operations = {
528 .read = read_profile,
529 .write = write_profile,
530 .llseek = default_llseek,
533 int __ref create_proc_profile(void)
535 struct proc_dir_entry *entry;
537 enum cpuhp_state online_state;
545 err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
546 profile_prepare_cpu, profile_dead_cpu);
550 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
551 profile_online_cpu, NULL);
557 entry = proc_create("profile", S_IWUSR | S_IRUGO,
558 NULL, &proc_profile_operations);
561 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
566 cpuhp_remove_state(online_state);
568 cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
572 subsys_initcall(create_proc_profile);
573 #endif /* CONFIG_PROC_FS */