arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14  * Tracepoint for calling kthread_stop, performed to end a kthread:
15  */
16 TRACE_EVENT(sched_kthread_stop,
17
18         TP_PROTO(struct task_struct *t),
19
20         TP_ARGS(t),
21
22         TP_STRUCT__entry(
23                 __array(        char,   comm,   TASK_COMM_LEN   )
24                 __field(        pid_t,  pid                     )
25         ),
26
27         TP_fast_assign(
28                 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29                 __entry->pid    = t->pid;
30         ),
31
32         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36  * Tracepoint for the return value of the kthread stopping:
37  */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40         TP_PROTO(int ret),
41
42         TP_ARGS(ret),
43
44         TP_STRUCT__entry(
45                 __field(        int,    ret     )
46         ),
47
48         TP_fast_assign(
49                 __entry->ret    = ret;
50         ),
51
52         TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56  * sched_kthread_work_queue_work - called when a work gets queued
57  * @worker:     pointer to the kthread_worker
58  * @work:       pointer to struct kthread_work
59  *
60  * This event occurs when a work is queued immediately or once a
61  * delayed work is actually queued (ie: once the delay has been
62  * reached).
63  */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66         TP_PROTO(struct kthread_worker *worker,
67                  struct kthread_work *work),
68
69         TP_ARGS(worker, work),
70
71         TP_STRUCT__entry(
72                 __field( void *,        work    )
73                 __field( void *,        function)
74                 __field( void *,        worker)
75         ),
76
77         TP_fast_assign(
78                 __entry->work           = work;
79                 __entry->function       = work->func;
80                 __entry->worker         = worker;
81         ),
82
83         TP_printk("work struct=%p function=%ps worker=%p",
84                   __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88  * sched_kthread_work_execute_start - called immediately before the work callback
89  * @work:       pointer to struct kthread_work
90  *
91  * Allows to track kthread work execution.
92  */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95         TP_PROTO(struct kthread_work *work),
96
97         TP_ARGS(work),
98
99         TP_STRUCT__entry(
100                 __field( void *,        work    )
101                 __field( void *,        function)
102         ),
103
104         TP_fast_assign(
105                 __entry->work           = work;
106                 __entry->function       = work->func;
107         ),
108
109         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113  * sched_kthread_work_execute_end - called immediately after the work callback
114  * @work:       pointer to struct work_struct
115  * @function:   pointer to worker function
116  *
117  * Allows to track workqueue execution.
118  */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121         TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123         TP_ARGS(work, function),
124
125         TP_STRUCT__entry(
126                 __field( void *,        work    )
127                 __field( void *,        function)
128         ),
129
130         TP_fast_assign(
131                 __entry->work           = work;
132                 __entry->function       = function;
133         ),
134
135         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139  * Tracepoint for waking up a task:
140  */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143         TP_PROTO(struct task_struct *p),
144
145         TP_ARGS(__perf_task(p)),
146
147         TP_STRUCT__entry(
148                 __array(        char,   comm,   TASK_COMM_LEN   )
149                 __field(        pid_t,  pid                     )
150                 __field(        int,    prio                    )
151                 __field(        int,    target_cpu              )
152         ),
153
154         TP_fast_assign(
155                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156                 __entry->pid            = p->pid;
157                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
158                 __entry->target_cpu     = task_cpu(p);
159         ),
160
161         TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162                   __entry->comm, __entry->pid, __entry->prio,
163                   __entry->target_cpu)
164 );
165
166 /*
167  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168  * called from the waking context.
169  */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171              TP_PROTO(struct task_struct *p),
172              TP_ARGS(p));
173
174 /*
175  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176  * It is not always called from the waking context.
177  */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179              TP_PROTO(struct task_struct *p),
180              TP_ARGS(p));
181
182 /*
183  * Tracepoint for waking up a new task:
184  */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186              TP_PROTO(struct task_struct *p),
187              TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
190 static inline long __trace_sched_switch_state(bool preempt,
191                                               unsigned int prev_state,
192                                               struct task_struct *p)
193 {
194         unsigned int state;
195
196 #ifdef CONFIG_SCHED_DEBUG
197         BUG_ON(p != current);
198 #endif /* CONFIG_SCHED_DEBUG */
199
200         /*
201          * Preemption ignores task state, therefore preempted tasks are always
202          * RUNNING (we will not have dequeued if state != RUNNING).
203          */
204         if (preempt)
205                 return TASK_REPORT_MAX;
206
207         /*
208          * task_state_index() uses fls() and returns a value from 0-8 range.
209          * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
210          * it for left shift operation to get the correct task->state
211          * mapping.
212          */
213         state = __task_state_index(prev_state, p->exit_state);
214
215         return state ? (1 << (state - 1)) : state;
216 }
217 #endif /* CREATE_TRACE_POINTS */
218
219 /*
220  * Tracepoint for task switches, performed by the scheduler:
221  */
222 TRACE_EVENT(sched_switch,
223
224         TP_PROTO(bool preempt,
225                  struct task_struct *prev,
226                  struct task_struct *next,
227                  unsigned int prev_state),
228
229         TP_ARGS(preempt, prev, next, prev_state),
230
231         TP_STRUCT__entry(
232                 __array(        char,   prev_comm,      TASK_COMM_LEN   )
233                 __field(        pid_t,  prev_pid                        )
234                 __field(        int,    prev_prio                       )
235                 __field(        long,   prev_state                      )
236                 __array(        char,   next_comm,      TASK_COMM_LEN   )
237                 __field(        pid_t,  next_pid                        )
238                 __field(        int,    next_prio                       )
239         ),
240
241         TP_fast_assign(
242                 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243                 __entry->prev_pid       = prev->pid;
244                 __entry->prev_prio      = prev->prio;
245                 __entry->prev_state     = __trace_sched_switch_state(preempt, prev_state, prev);
246                 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247                 __entry->next_pid       = next->pid;
248                 __entry->next_prio      = next->prio;
249                 /* XXX SCHED_DEADLINE */
250         ),
251
252         TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253                 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254
255                 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
256                   __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257                                 { TASK_INTERRUPTIBLE, "S" },
258                                 { TASK_UNINTERRUPTIBLE, "D" },
259                                 { __TASK_STOPPED, "T" },
260                                 { __TASK_TRACED, "t" },
261                                 { EXIT_DEAD, "X" },
262                                 { EXIT_ZOMBIE, "Z" },
263                                 { TASK_PARKED, "P" },
264                                 { TASK_DEAD, "I" }) :
265                   "R",
266
267                 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268                 __entry->next_comm, __entry->next_pid, __entry->next_prio)
269 );
270
271 /*
272  * Tracepoint for a task being migrated:
273  */
274 TRACE_EVENT(sched_migrate_task,
275
276         TP_PROTO(struct task_struct *p, int dest_cpu),
277
278         TP_ARGS(p, dest_cpu),
279
280         TP_STRUCT__entry(
281                 __array(        char,   comm,   TASK_COMM_LEN   )
282                 __field(        pid_t,  pid                     )
283                 __field(        int,    prio                    )
284                 __field(        int,    orig_cpu                )
285                 __field(        int,    dest_cpu                )
286         ),
287
288         TP_fast_assign(
289                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290                 __entry->pid            = p->pid;
291                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
292                 __entry->orig_cpu       = task_cpu(p);
293                 __entry->dest_cpu       = dest_cpu;
294         ),
295
296         TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297                   __entry->comm, __entry->pid, __entry->prio,
298                   __entry->orig_cpu, __entry->dest_cpu)
299 );
300
301 DECLARE_EVENT_CLASS(sched_process_template,
302
303         TP_PROTO(struct task_struct *p),
304
305         TP_ARGS(p),
306
307         TP_STRUCT__entry(
308                 __array(        char,   comm,   TASK_COMM_LEN   )
309                 __field(        pid_t,  pid                     )
310                 __field(        int,    prio                    )
311         ),
312
313         TP_fast_assign(
314                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315                 __entry->pid            = p->pid;
316                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
317         ),
318
319         TP_printk("comm=%s pid=%d prio=%d",
320                   __entry->comm, __entry->pid, __entry->prio)
321 );
322
323 /*
324  * Tracepoint for freeing a task:
325  */
326 DEFINE_EVENT(sched_process_template, sched_process_free,
327              TP_PROTO(struct task_struct *p),
328              TP_ARGS(p));
329
330 /*
331  * Tracepoint for a task exiting:
332  */
333 DEFINE_EVENT(sched_process_template, sched_process_exit,
334              TP_PROTO(struct task_struct *p),
335              TP_ARGS(p));
336
337 /*
338  * Tracepoint for waiting on task to unschedule:
339  */
340 DEFINE_EVENT(sched_process_template, sched_wait_task,
341         TP_PROTO(struct task_struct *p),
342         TP_ARGS(p));
343
344 /*
345  * Tracepoint for a waiting task:
346  */
347 TRACE_EVENT(sched_process_wait,
348
349         TP_PROTO(struct pid *pid),
350
351         TP_ARGS(pid),
352
353         TP_STRUCT__entry(
354                 __array(        char,   comm,   TASK_COMM_LEN   )
355                 __field(        pid_t,  pid                     )
356                 __field(        int,    prio                    )
357         ),
358
359         TP_fast_assign(
360                 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361                 __entry->pid            = pid_nr(pid);
362                 __entry->prio           = current->prio; /* XXX SCHED_DEADLINE */
363         ),
364
365         TP_printk("comm=%s pid=%d prio=%d",
366                   __entry->comm, __entry->pid, __entry->prio)
367 );
368
369 /*
370  * Tracepoint for kernel_clone:
371  */
372 TRACE_EVENT(sched_process_fork,
373
374         TP_PROTO(struct task_struct *parent, struct task_struct *child),
375
376         TP_ARGS(parent, child),
377
378         TP_STRUCT__entry(
379                 __array(        char,   parent_comm,    TASK_COMM_LEN   )
380                 __field(        pid_t,  parent_pid                      )
381                 __array(        char,   child_comm,     TASK_COMM_LEN   )
382                 __field(        pid_t,  child_pid                       )
383         ),
384
385         TP_fast_assign(
386                 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387                 __entry->parent_pid     = parent->pid;
388                 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389                 __entry->child_pid      = child->pid;
390         ),
391
392         TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393                 __entry->parent_comm, __entry->parent_pid,
394                 __entry->child_comm, __entry->child_pid)
395 );
396
397 /*
398  * Tracepoint for exec:
399  */
400 TRACE_EVENT(sched_process_exec,
401
402         TP_PROTO(struct task_struct *p, pid_t old_pid,
403                  struct linux_binprm *bprm),
404
405         TP_ARGS(p, old_pid, bprm),
406
407         TP_STRUCT__entry(
408                 __string(       filename,       bprm->filename  )
409                 __field(        pid_t,          pid             )
410                 __field(        pid_t,          old_pid         )
411         ),
412
413         TP_fast_assign(
414                 __assign_str(filename, bprm->filename);
415                 __entry->pid            = p->pid;
416                 __entry->old_pid        = old_pid;
417         ),
418
419         TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
420                   __entry->pid, __entry->old_pid)
421 );
422
423
424 #ifdef CONFIG_SCHEDSTATS
425 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
426 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
427 #else
428 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
429 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
430 #endif
431
432 /*
433  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
434  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
435  */
436 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
437
438         TP_PROTO(struct task_struct *tsk, u64 delay),
439
440         TP_ARGS(__perf_task(tsk), __perf_count(delay)),
441
442         TP_STRUCT__entry(
443                 __array( char,  comm,   TASK_COMM_LEN   )
444                 __field( pid_t, pid                     )
445                 __field( u64,   delay                   )
446         ),
447
448         TP_fast_assign(
449                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
450                 __entry->pid    = tsk->pid;
451                 __entry->delay  = delay;
452         ),
453
454         TP_printk("comm=%s pid=%d delay=%Lu [ns]",
455                         __entry->comm, __entry->pid,
456                         (unsigned long long)__entry->delay)
457 );
458
459 /*
460  * Tracepoint for accounting wait time (time the task is runnable
461  * but not actually running due to scheduler contention).
462  */
463 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
464              TP_PROTO(struct task_struct *tsk, u64 delay),
465              TP_ARGS(tsk, delay));
466
467 /*
468  * Tracepoint for accounting sleep time (time the task is not runnable,
469  * including iowait, see below).
470  */
471 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
472              TP_PROTO(struct task_struct *tsk, u64 delay),
473              TP_ARGS(tsk, delay));
474
475 /*
476  * Tracepoint for accounting iowait time (time the task is not runnable
477  * due to waiting on IO to complete).
478  */
479 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
480              TP_PROTO(struct task_struct *tsk, u64 delay),
481              TP_ARGS(tsk, delay));
482
483 /*
484  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485  */
486 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
487              TP_PROTO(struct task_struct *tsk, u64 delay),
488              TP_ARGS(tsk, delay));
489
490 /*
491  * Tracepoint for accounting runtime (time the task is executing
492  * on a CPU).
493  */
494 DECLARE_EVENT_CLASS(sched_stat_runtime,
495
496         TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
497
498         TP_ARGS(tsk, __perf_count(runtime), vruntime),
499
500         TP_STRUCT__entry(
501                 __array( char,  comm,   TASK_COMM_LEN   )
502                 __field( pid_t, pid                     )
503                 __field( u64,   runtime                 )
504                 __field( u64,   vruntime                        )
505         ),
506
507         TP_fast_assign(
508                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
509                 __entry->pid            = tsk->pid;
510                 __entry->runtime        = runtime;
511                 __entry->vruntime       = vruntime;
512         ),
513
514         TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
515                         __entry->comm, __entry->pid,
516                         (unsigned long long)__entry->runtime,
517                         (unsigned long long)__entry->vruntime)
518 );
519
520 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
521              TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
522              TP_ARGS(tsk, runtime, vruntime));
523
524 /*
525  * Tracepoint for showing priority inheritance modifying a tasks
526  * priority.
527  */
528 TRACE_EVENT(sched_pi_setprio,
529
530         TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
531
532         TP_ARGS(tsk, pi_task),
533
534         TP_STRUCT__entry(
535                 __array( char,  comm,   TASK_COMM_LEN   )
536                 __field( pid_t, pid                     )
537                 __field( int,   oldprio                 )
538                 __field( int,   newprio                 )
539         ),
540
541         TP_fast_assign(
542                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
543                 __entry->pid            = tsk->pid;
544                 __entry->oldprio        = tsk->prio;
545                 __entry->newprio        = pi_task ?
546                                 min(tsk->normal_prio, pi_task->prio) :
547                                 tsk->normal_prio;
548                 /* XXX SCHED_DEADLINE bits missing */
549         ),
550
551         TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
552                         __entry->comm, __entry->pid,
553                         __entry->oldprio, __entry->newprio)
554 );
555
556 #ifdef CONFIG_DETECT_HUNG_TASK
557 TRACE_EVENT(sched_process_hang,
558         TP_PROTO(struct task_struct *tsk),
559         TP_ARGS(tsk),
560
561         TP_STRUCT__entry(
562                 __array( char,  comm,   TASK_COMM_LEN   )
563                 __field( pid_t, pid                     )
564         ),
565
566         TP_fast_assign(
567                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
568                 __entry->pid = tsk->pid;
569         ),
570
571         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
572 );
573 #endif /* CONFIG_DETECT_HUNG_TASK */
574
575 /*
576  * Tracks migration of tasks from one runqueue to another. Can be used to
577  * detect if automatic NUMA balancing is bouncing between nodes.
578  */
579 TRACE_EVENT(sched_move_numa,
580
581         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
582
583         TP_ARGS(tsk, src_cpu, dst_cpu),
584
585         TP_STRUCT__entry(
586                 __field( pid_t, pid                     )
587                 __field( pid_t, tgid                    )
588                 __field( pid_t, ngid                    )
589                 __field( int,   src_cpu                 )
590                 __field( int,   src_nid                 )
591                 __field( int,   dst_cpu                 )
592                 __field( int,   dst_nid                 )
593         ),
594
595         TP_fast_assign(
596                 __entry->pid            = task_pid_nr(tsk);
597                 __entry->tgid           = task_tgid_nr(tsk);
598                 __entry->ngid           = task_numa_group_id(tsk);
599                 __entry->src_cpu        = src_cpu;
600                 __entry->src_nid        = cpu_to_node(src_cpu);
601                 __entry->dst_cpu        = dst_cpu;
602                 __entry->dst_nid        = cpu_to_node(dst_cpu);
603         ),
604
605         TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
606                         __entry->pid, __entry->tgid, __entry->ngid,
607                         __entry->src_cpu, __entry->src_nid,
608                         __entry->dst_cpu, __entry->dst_nid)
609 );
610
611 DECLARE_EVENT_CLASS(sched_numa_pair_template,
612
613         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
614                  struct task_struct *dst_tsk, int dst_cpu),
615
616         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
617
618         TP_STRUCT__entry(
619                 __field( pid_t, src_pid                 )
620                 __field( pid_t, src_tgid                )
621                 __field( pid_t, src_ngid                )
622                 __field( int,   src_cpu                 )
623                 __field( int,   src_nid                 )
624                 __field( pid_t, dst_pid                 )
625                 __field( pid_t, dst_tgid                )
626                 __field( pid_t, dst_ngid                )
627                 __field( int,   dst_cpu                 )
628                 __field( int,   dst_nid                 )
629         ),
630
631         TP_fast_assign(
632                 __entry->src_pid        = task_pid_nr(src_tsk);
633                 __entry->src_tgid       = task_tgid_nr(src_tsk);
634                 __entry->src_ngid       = task_numa_group_id(src_tsk);
635                 __entry->src_cpu        = src_cpu;
636                 __entry->src_nid        = cpu_to_node(src_cpu);
637                 __entry->dst_pid        = dst_tsk ? task_pid_nr(dst_tsk) : 0;
638                 __entry->dst_tgid       = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
639                 __entry->dst_ngid       = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
640                 __entry->dst_cpu        = dst_cpu;
641                 __entry->dst_nid        = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
642         ),
643
644         TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
645                         __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
646                         __entry->src_cpu, __entry->src_nid,
647                         __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
648                         __entry->dst_cpu, __entry->dst_nid)
649 );
650
651 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
652
653         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
654                  struct task_struct *dst_tsk, int dst_cpu),
655
656         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
657 );
658
659 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
660
661         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
662                  struct task_struct *dst_tsk, int dst_cpu),
663
664         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
665 );
666
667 #ifdef CONFIG_NUMA_BALANCING
668 #define NUMAB_SKIP_REASON                                       \
669         EM( NUMAB_SKIP_UNSUITABLE,              "unsuitable" )  \
670         EM( NUMAB_SKIP_SHARED_RO,               "shared_ro" )   \
671         EM( NUMAB_SKIP_INACCESSIBLE,            "inaccessible" )        \
672         EM( NUMAB_SKIP_SCAN_DELAY,              "scan_delay" )  \
673         EM( NUMAB_SKIP_PID_INACTIVE,            "pid_inactive" )        \
674         EM( NUMAB_SKIP_IGNORE_PID,              "ignore_pid_inactive" )         \
675         EMe(NUMAB_SKIP_SEQ_COMPLETED,           "seq_completed" )
676
677 /* Redefine for export. */
678 #undef EM
679 #undef EMe
680 #define EM(a, b)        TRACE_DEFINE_ENUM(a);
681 #define EMe(a, b)       TRACE_DEFINE_ENUM(a);
682
683 NUMAB_SKIP_REASON
684
685 /* Redefine for symbolic printing. */
686 #undef EM
687 #undef EMe
688 #define EM(a, b)        { a, b },
689 #define EMe(a, b)       { a, b }
690
691 TRACE_EVENT(sched_skip_vma_numa,
692
693         TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
694                  enum numa_vmaskip_reason reason),
695
696         TP_ARGS(mm, vma, reason),
697
698         TP_STRUCT__entry(
699                 __field(unsigned long, numa_scan_offset)
700                 __field(unsigned long, vm_start)
701                 __field(unsigned long, vm_end)
702                 __field(enum numa_vmaskip_reason, reason)
703         ),
704
705         TP_fast_assign(
706                 __entry->numa_scan_offset       = mm->numa_scan_offset;
707                 __entry->vm_start               = vma->vm_start;
708                 __entry->vm_end                 = vma->vm_end;
709                 __entry->reason                 = reason;
710         ),
711
712         TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
713                   __entry->numa_scan_offset,
714                   __entry->vm_start,
715                   __entry->vm_end,
716                   __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
717 );
718 #endif /* CONFIG_NUMA_BALANCING */
719
720 /*
721  * Tracepoint for waking a polling cpu without an IPI.
722  */
723 TRACE_EVENT(sched_wake_idle_without_ipi,
724
725         TP_PROTO(int cpu),
726
727         TP_ARGS(cpu),
728
729         TP_STRUCT__entry(
730                 __field(        int,    cpu     )
731         ),
732
733         TP_fast_assign(
734                 __entry->cpu    = cpu;
735         ),
736
737         TP_printk("cpu=%d", __entry->cpu)
738 );
739
740 /*
741  * Following tracepoints are not exported in tracefs and provide hooking
742  * mechanisms only for testing and debugging purposes.
743  *
744  * Postfixed with _tp to make them easily identifiable in the code.
745  */
746 DECLARE_TRACE(pelt_cfs_tp,
747         TP_PROTO(struct cfs_rq *cfs_rq),
748         TP_ARGS(cfs_rq));
749
750 DECLARE_TRACE(pelt_rt_tp,
751         TP_PROTO(struct rq *rq),
752         TP_ARGS(rq));
753
754 DECLARE_TRACE(pelt_dl_tp,
755         TP_PROTO(struct rq *rq),
756         TP_ARGS(rq));
757
758 DECLARE_TRACE(pelt_thermal_tp,
759         TP_PROTO(struct rq *rq),
760         TP_ARGS(rq));
761
762 DECLARE_TRACE(pelt_irq_tp,
763         TP_PROTO(struct rq *rq),
764         TP_ARGS(rq));
765
766 DECLARE_TRACE(pelt_se_tp,
767         TP_PROTO(struct sched_entity *se),
768         TP_ARGS(se));
769
770 DECLARE_TRACE(sched_cpu_capacity_tp,
771         TP_PROTO(struct rq *rq),
772         TP_ARGS(rq));
773
774 DECLARE_TRACE(sched_overutilized_tp,
775         TP_PROTO(struct root_domain *rd, bool overutilized),
776         TP_ARGS(rd, overutilized));
777
778 DECLARE_TRACE(sched_util_est_cfs_tp,
779         TP_PROTO(struct cfs_rq *cfs_rq),
780         TP_ARGS(cfs_rq));
781
782 DECLARE_TRACE(sched_util_est_se_tp,
783         TP_PROTO(struct sched_entity *se),
784         TP_ARGS(se));
785
786 DECLARE_TRACE(sched_update_nr_running_tp,
787         TP_PROTO(struct rq *rq, int change),
788         TP_ARGS(rq, change));
789
790 DECLARE_TRACE(sched_compute_energy_tp,
791         TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
792                  unsigned long max_util, unsigned long busy_time),
793         TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
794
795 #endif /* _TRACE_SCHED_H */
796
797 /* This part must be outside protection */
798 #include <trace/define_trace.h>