2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Consolidation of architecture support code for profiling,
11 * Nadia Yvette Chambers, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
17 #include <linux/export.h>
18 #include <linux/profile.h>
19 #include <linux/bootmem.h>
20 #include <linux/notifier.h>
22 #include <linux/cpumask.h>
23 #include <linux/cpu.h>
24 #include <linux/highmem.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <asm/sections.h>
29 #include <asm/irq_regs.h>
30 #include <asm/ptrace.h>
35 #define PROFILE_GRPSHIFT 3
36 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
37 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
38 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
40 static atomic_t *prof_buffer;
41 static unsigned long prof_len;
42 static unsigned short int prof_shift;
44 int prof_on __read_mostly;
45 EXPORT_SYMBOL_GPL(prof_on);
47 static cpumask_var_t prof_cpu_mask;
48 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
49 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
50 static DEFINE_PER_CPU(int, cpu_profile_flip);
51 static DEFINE_MUTEX(profile_flip_mutex);
52 #endif /* CONFIG_SMP */
54 int profile_setup(char *str)
56 static const char schedstr[] = "schedule";
57 static const char sleepstr[] = "sleep";
58 static const char kvmstr[] = "kvm";
61 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
62 #ifdef CONFIG_SCHEDSTATS
63 force_schedstat_enabled();
64 prof_on = SLEEP_PROFILING;
65 if (str[strlen(sleepstr)] == ',')
66 str += strlen(sleepstr) + 1;
67 if (get_option(&str, &par))
68 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
69 pr_info("kernel sleep profiling enabled (shift: %u)\n",
72 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
73 #endif /* CONFIG_SCHEDSTATS */
74 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
75 prof_on = SCHED_PROFILING;
76 if (str[strlen(schedstr)] == ',')
77 str += strlen(schedstr) + 1;
78 if (get_option(&str, &par))
79 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
80 pr_info("kernel schedule profiling enabled (shift: %u)\n",
82 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
83 prof_on = KVM_PROFILING;
84 if (str[strlen(kvmstr)] == ',')
85 str += strlen(kvmstr) + 1;
86 if (get_option(&str, &par))
87 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
88 pr_info("kernel KVM profiling enabled (shift: %u)\n",
90 } else if (get_option(&str, &par)) {
91 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
92 prof_on = CPU_PROFILING;
93 pr_info("kernel profiling enabled (shift: %u)\n",
98 __setup("profile=", profile_setup);
101 int __ref profile_init(void)
107 /* only text is profiled */
108 prof_len = (_etext - _stext) >> prof_shift;
109 buffer_bytes = prof_len*sizeof(atomic_t);
111 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
114 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
116 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
120 prof_buffer = alloc_pages_exact(buffer_bytes,
121 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
125 prof_buffer = vzalloc(buffer_bytes);
129 free_cpumask_var(prof_cpu_mask);
133 /* Profile event notifications */
135 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
136 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
137 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
139 void profile_task_exit(struct task_struct *task)
141 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
144 int profile_handoff_task(struct task_struct *task)
147 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
148 return (ret == NOTIFY_OK) ? 1 : 0;
151 void profile_munmap(unsigned long addr)
153 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
156 int task_handoff_register(struct notifier_block *n)
158 return atomic_notifier_chain_register(&task_free_notifier, n);
160 EXPORT_SYMBOL_GPL(task_handoff_register);
162 int task_handoff_unregister(struct notifier_block *n)
164 return atomic_notifier_chain_unregister(&task_free_notifier, n);
166 EXPORT_SYMBOL_GPL(task_handoff_unregister);
168 int profile_event_register(enum profile_type type, struct notifier_block *n)
173 case PROFILE_TASK_EXIT:
174 err = blocking_notifier_chain_register(
175 &task_exit_notifier, n);
178 err = blocking_notifier_chain_register(
179 &munmap_notifier, n);
185 EXPORT_SYMBOL_GPL(profile_event_register);
187 int profile_event_unregister(enum profile_type type, struct notifier_block *n)
192 case PROFILE_TASK_EXIT:
193 err = blocking_notifier_chain_unregister(
194 &task_exit_notifier, n);
197 err = blocking_notifier_chain_unregister(
198 &munmap_notifier, n);
204 EXPORT_SYMBOL_GPL(profile_event_unregister);
206 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
208 * Each cpu has a pair of open-addressed hashtables for pending
209 * profile hits. read_profile() IPI's all cpus to request them
210 * to flip buffers and flushes their contents to prof_buffer itself.
211 * Flip requests are serialized by the profile_flip_mutex. The sole
212 * use of having a second hashtable is for avoiding cacheline
213 * contention that would otherwise happen during flushes of pending
214 * profile hits required for the accuracy of reported profile hits
215 * and so resurrect the interrupt livelock issue.
217 * The open-addressed hashtables are indexed by profile buffer slot
218 * and hold the number of pending hits to that profile buffer slot on
219 * a cpu in an entry. When the hashtable overflows, all pending hits
220 * are accounted to their corresponding profile buffer slots with
221 * atomic_add() and the hashtable emptied. As numerous pending hits
222 * may be accounted to a profile buffer slot in a hashtable entry,
223 * this amortizes a number of atomic profile buffer increments likely
224 * to be far larger than the number of entries in the hashtable,
225 * particularly given that the number of distinct profile buffer
226 * positions to which hits are accounted during short intervals (e.g.
227 * several seconds) is usually very small. Exclusion from buffer
228 * flipping is provided by interrupt disablement (note that for
229 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
231 * The hash function is meant to be lightweight as opposed to strong,
232 * and was vaguely inspired by ppc64 firmware-supported inverted
233 * pagetable hash functions, but uses a full hashtable full of finite
234 * collision chains, not just pairs of them.
238 static void __profile_flip_buffers(void *unused)
240 int cpu = smp_processor_id();
242 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
245 static void profile_flip_buffers(void)
249 mutex_lock(&profile_flip_mutex);
250 j = per_cpu(cpu_profile_flip, get_cpu());
252 on_each_cpu(__profile_flip_buffers, NULL, 1);
253 for_each_online_cpu(cpu) {
254 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
255 for (i = 0; i < NR_PROFILE_HIT; ++i) {
261 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
262 hits[i].hits = hits[i].pc = 0;
265 mutex_unlock(&profile_flip_mutex);
268 static void profile_discard_flip_buffers(void)
272 mutex_lock(&profile_flip_mutex);
273 i = per_cpu(cpu_profile_flip, get_cpu());
275 on_each_cpu(__profile_flip_buffers, NULL, 1);
276 for_each_online_cpu(cpu) {
277 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
278 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
280 mutex_unlock(&profile_flip_mutex);
283 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
285 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
287 struct profile_hit *hits;
289 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
290 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
291 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
293 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
299 * We buffer the global profiler buffer into a per-CPU
300 * queue and thus reduce the number of global (and possibly
301 * NUMA-alien) accesses. The write-queue is self-coalescing:
303 local_irq_save(flags);
305 for (j = 0; j < PROFILE_GRPSZ; ++j) {
306 if (hits[i + j].pc == pc) {
307 hits[i + j].hits += nr_hits;
309 } else if (!hits[i + j].hits) {
311 hits[i + j].hits = nr_hits;
315 i = (i + secondary) & (NR_PROFILE_HIT - 1);
316 } while (i != primary);
319 * Add the current hit(s) and flush the write-queue out
320 * to the global buffer:
322 atomic_add(nr_hits, &prof_buffer[pc]);
323 for (i = 0; i < NR_PROFILE_HIT; ++i) {
324 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
325 hits[i].pc = hits[i].hits = 0;
328 local_irq_restore(flags);
332 static int profile_dead_cpu(unsigned int cpu)
337 if (prof_cpu_mask != NULL)
338 cpumask_clear_cpu(cpu, prof_cpu_mask);
340 for (i = 0; i < 2; i++) {
341 if (per_cpu(cpu_profile_hits, cpu)[i]) {
342 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
343 per_cpu(cpu_profile_hits, cpu)[i] = NULL;
350 static int profile_prepare_cpu(unsigned int cpu)
352 int i, node = cpu_to_mem(cpu);
355 per_cpu(cpu_profile_flip, cpu) = 0;
357 for (i = 0; i < 2; i++) {
358 if (per_cpu(cpu_profile_hits, cpu)[i])
361 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
363 profile_dead_cpu(cpu);
366 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
372 static int profile_online_cpu(unsigned int cpu)
374 if (prof_cpu_mask != NULL)
375 cpumask_set_cpu(cpu, prof_cpu_mask);
380 #else /* !CONFIG_SMP */
381 #define profile_flip_buffers() do { } while (0)
382 #define profile_discard_flip_buffers() do { } while (0)
384 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
387 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
388 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
390 #endif /* !CONFIG_SMP */
392 void profile_hits(int type, void *__pc, unsigned int nr_hits)
394 if (prof_on != type || !prof_buffer)
396 do_profile_hits(type, __pc, nr_hits);
398 EXPORT_SYMBOL_GPL(profile_hits);
400 void profile_tick(int type)
402 struct pt_regs *regs = get_irq_regs();
404 if (!user_mode(regs) && prof_cpu_mask != NULL &&
405 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
406 profile_hit(type, (void *)profile_pc(regs));
409 #ifdef CONFIG_PROC_FS
410 #include <linux/proc_fs.h>
411 #include <linux/seq_file.h>
412 #include <asm/uaccess.h>
414 static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
416 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
420 static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
422 return single_open(file, prof_cpu_mask_proc_show, NULL);
425 static ssize_t prof_cpu_mask_proc_write(struct file *file,
426 const char __user *buffer, size_t count, loff_t *pos)
428 cpumask_var_t new_value;
431 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
434 err = cpumask_parse_user(buffer, count, new_value);
436 cpumask_copy(prof_cpu_mask, new_value);
439 free_cpumask_var(new_value);
443 static const struct file_operations prof_cpu_mask_proc_fops = {
444 .open = prof_cpu_mask_proc_open,
447 .release = single_release,
448 .write = prof_cpu_mask_proc_write,
451 void create_prof_cpu_mask(void)
453 /* create /proc/irq/prof_cpu_mask */
454 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
458 * This function accesses profiling information. The returned data is
459 * binary: the sampling step and the actual contents of the profile
460 * buffer. Use of the program readprofile is recommended in order to
461 * get meaningful info out of these data.
464 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
466 unsigned long p = *ppos;
469 unsigned long sample_step = 1UL << prof_shift;
471 profile_flip_buffers();
472 if (p >= (prof_len+1)*sizeof(unsigned int))
474 if (count > (prof_len+1)*sizeof(unsigned int) - p)
475 count = (prof_len+1)*sizeof(unsigned int) - p;
478 while (p < sizeof(unsigned int) && count > 0) {
479 if (put_user(*((char *)(&sample_step)+p), buf))
481 buf++; p++; count--; read++;
483 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
484 if (copy_to_user(buf, (void *)pnt, count))
492 * Writing to /proc/profile resets the counters
494 * Writing a 'profiling multiplier' value into it also re-sets the profiling
495 * interrupt frequency, on architectures that support this.
497 static ssize_t write_profile(struct file *file, const char __user *buf,
498 size_t count, loff_t *ppos)
501 extern int setup_profiling_timer(unsigned int multiplier);
503 if (count == sizeof(int)) {
504 unsigned int multiplier;
506 if (copy_from_user(&multiplier, buf, sizeof(int)))
509 if (setup_profiling_timer(multiplier))
513 profile_discard_flip_buffers();
514 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
518 static const struct file_operations proc_profile_operations = {
519 .read = read_profile,
520 .write = write_profile,
521 .llseek = default_llseek,
524 int __ref create_proc_profile(void)
526 struct proc_dir_entry *entry;
528 enum cpuhp_state online_state;
536 err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
537 profile_prepare_cpu, profile_dead_cpu);
541 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
542 profile_online_cpu, NULL);
548 entry = proc_create("profile", S_IWUSR | S_IRUGO,
549 NULL, &proc_profile_operations);
552 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
557 cpuhp_remove_state(online_state);
559 cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
563 subsys_initcall(create_proc_profile);
564 #endif /* CONFIG_PROC_FS */