don't make the syscall checking produce errors from warnings
[linux-2.6-microblaze.git] / include / trace / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14  * Tracepoint for calling kthread_stop, performed to end a kthread:
15  */
16 TRACE_EVENT(sched_kthread_stop,
17
18         TP_PROTO(struct task_struct *t),
19
20         TP_ARGS(t),
21
22         TP_STRUCT__entry(
23                 __array(        char,   comm,   TASK_COMM_LEN   )
24                 __field(        pid_t,  pid                     )
25         ),
26
27         TP_fast_assign(
28                 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29                 __entry->pid    = t->pid;
30         ),
31
32         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36  * Tracepoint for the return value of the kthread stopping:
37  */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40         TP_PROTO(int ret),
41
42         TP_ARGS(ret),
43
44         TP_STRUCT__entry(
45                 __field(        int,    ret     )
46         ),
47
48         TP_fast_assign(
49                 __entry->ret    = ret;
50         ),
51
52         TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56  * sched_kthread_work_queue_work - called when a work gets queued
57  * @worker:     pointer to the kthread_worker
58  * @work:       pointer to struct kthread_work
59  *
60  * This event occurs when a work is queued immediately or once a
61  * delayed work is actually queued (ie: once the delay has been
62  * reached).
63  */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66         TP_PROTO(struct kthread_worker *worker,
67                  struct kthread_work *work),
68
69         TP_ARGS(worker, work),
70
71         TP_STRUCT__entry(
72                 __field( void *,        work    )
73                 __field( void *,        function)
74                 __field( void *,        worker)
75         ),
76
77         TP_fast_assign(
78                 __entry->work           = work;
79                 __entry->function       = work->func;
80                 __entry->worker         = worker;
81         ),
82
83         TP_printk("work struct=%p function=%ps worker=%p",
84                   __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88  * sched_kthread_work_execute_start - called immediately before the work callback
89  * @work:       pointer to struct kthread_work
90  *
91  * Allows to track kthread work execution.
92  */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95         TP_PROTO(struct kthread_work *work),
96
97         TP_ARGS(work),
98
99         TP_STRUCT__entry(
100                 __field( void *,        work    )
101                 __field( void *,        function)
102         ),
103
104         TP_fast_assign(
105                 __entry->work           = work;
106                 __entry->function       = work->func;
107         ),
108
109         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113  * sched_kthread_work_execute_end - called immediately after the work callback
114  * @work:       pointer to struct work_struct
115  * @function:   pointer to worker function
116  *
117  * Allows to track workqueue execution.
118  */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121         TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123         TP_ARGS(work, function),
124
125         TP_STRUCT__entry(
126                 __field( void *,        work    )
127                 __field( void *,        function)
128         ),
129
130         TP_fast_assign(
131                 __entry->work           = work;
132                 __entry->function       = function;
133         ),
134
135         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139  * Tracepoint for waking up a task:
140  */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143         TP_PROTO(struct task_struct *p),
144
145         TP_ARGS(__perf_task(p)),
146
147         TP_STRUCT__entry(
148                 __array(        char,   comm,   TASK_COMM_LEN   )
149                 __field(        pid_t,  pid                     )
150                 __field(        int,    prio                    )
151                 __field(        int,    target_cpu              )
152         ),
153
154         TP_fast_assign(
155                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156                 __entry->pid            = p->pid;
157                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
158                 __entry->target_cpu     = task_cpu(p);
159         ),
160
161         TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162                   __entry->comm, __entry->pid, __entry->prio,
163                   __entry->target_cpu)
164 );
165
166 /*
167  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168  * called from the waking context.
169  */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171              TP_PROTO(struct task_struct *p),
172              TP_ARGS(p));
173
174 /*
175  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176  * It is not always called from the waking context.
177  */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179              TP_PROTO(struct task_struct *p),
180              TP_ARGS(p));
181
182 /*
183  * Tracepoint for waking up a new task:
184  */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186              TP_PROTO(struct task_struct *p),
187              TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
190 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
191 {
192         unsigned int state;
193
194 #ifdef CONFIG_SCHED_DEBUG
195         BUG_ON(p != current);
196 #endif /* CONFIG_SCHED_DEBUG */
197
198         /*
199          * Preemption ignores task state, therefore preempted tasks are always
200          * RUNNING (we will not have dequeued if state != RUNNING).
201          */
202         if (preempt)
203                 return TASK_REPORT_MAX;
204
205         /*
206          * task_state_index() uses fls() and returns a value from 0-8 range.
207          * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
208          * it for left shift operation to get the correct task->state
209          * mapping.
210          */
211         state = task_state_index(p);
212
213         return state ? (1 << (state - 1)) : state;
214 }
215 #endif /* CREATE_TRACE_POINTS */
216
217 /*
218  * Tracepoint for task switches, performed by the scheduler:
219  */
220 TRACE_EVENT(sched_switch,
221
222         TP_PROTO(bool preempt,
223                  struct task_struct *prev,
224                  struct task_struct *next),
225
226         TP_ARGS(preempt, prev, next),
227
228         TP_STRUCT__entry(
229                 __array(        char,   prev_comm,      TASK_COMM_LEN   )
230                 __field(        pid_t,  prev_pid                        )
231                 __field(        int,    prev_prio                       )
232                 __field(        long,   prev_state                      )
233                 __array(        char,   next_comm,      TASK_COMM_LEN   )
234                 __field(        pid_t,  next_pid                        )
235                 __field(        int,    next_prio                       )
236         ),
237
238         TP_fast_assign(
239                 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
240                 __entry->prev_pid       = prev->pid;
241                 __entry->prev_prio      = prev->prio;
242                 __entry->prev_state     = __trace_sched_switch_state(preempt, prev);
243                 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
244                 __entry->next_pid       = next->pid;
245                 __entry->next_prio      = next->prio;
246                 /* XXX SCHED_DEADLINE */
247         ),
248
249         TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
250                 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251
252                 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
253                   __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
254                                 { TASK_INTERRUPTIBLE, "S" },
255                                 { TASK_UNINTERRUPTIBLE, "D" },
256                                 { __TASK_STOPPED, "T" },
257                                 { __TASK_TRACED, "t" },
258                                 { EXIT_DEAD, "X" },
259                                 { EXIT_ZOMBIE, "Z" },
260                                 { TASK_PARKED, "P" },
261                                 { TASK_DEAD, "I" }) :
262                   "R",
263
264                 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
265                 __entry->next_comm, __entry->next_pid, __entry->next_prio)
266 );
267
268 /*
269  * Tracepoint for a task being migrated:
270  */
271 TRACE_EVENT(sched_migrate_task,
272
273         TP_PROTO(struct task_struct *p, int dest_cpu),
274
275         TP_ARGS(p, dest_cpu),
276
277         TP_STRUCT__entry(
278                 __array(        char,   comm,   TASK_COMM_LEN   )
279                 __field(        pid_t,  pid                     )
280                 __field(        int,    prio                    )
281                 __field(        int,    orig_cpu                )
282                 __field(        int,    dest_cpu                )
283         ),
284
285         TP_fast_assign(
286                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
287                 __entry->pid            = p->pid;
288                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
289                 __entry->orig_cpu       = task_cpu(p);
290                 __entry->dest_cpu       = dest_cpu;
291         ),
292
293         TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
294                   __entry->comm, __entry->pid, __entry->prio,
295                   __entry->orig_cpu, __entry->dest_cpu)
296 );
297
298 DECLARE_EVENT_CLASS(sched_process_template,
299
300         TP_PROTO(struct task_struct *p),
301
302         TP_ARGS(p),
303
304         TP_STRUCT__entry(
305                 __array(        char,   comm,   TASK_COMM_LEN   )
306                 __field(        pid_t,  pid                     )
307                 __field(        int,    prio                    )
308         ),
309
310         TP_fast_assign(
311                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
312                 __entry->pid            = p->pid;
313                 __entry->prio           = p->prio; /* XXX SCHED_DEADLINE */
314         ),
315
316         TP_printk("comm=%s pid=%d prio=%d",
317                   __entry->comm, __entry->pid, __entry->prio)
318 );
319
320 /*
321  * Tracepoint for freeing a task:
322  */
323 DEFINE_EVENT(sched_process_template, sched_process_free,
324              TP_PROTO(struct task_struct *p),
325              TP_ARGS(p));
326
327 /*
328  * Tracepoint for a task exiting:
329  */
330 DEFINE_EVENT(sched_process_template, sched_process_exit,
331              TP_PROTO(struct task_struct *p),
332              TP_ARGS(p));
333
334 /*
335  * Tracepoint for waiting on task to unschedule:
336  */
337 DEFINE_EVENT(sched_process_template, sched_wait_task,
338         TP_PROTO(struct task_struct *p),
339         TP_ARGS(p));
340
341 /*
342  * Tracepoint for a waiting task:
343  */
344 TRACE_EVENT(sched_process_wait,
345
346         TP_PROTO(struct pid *pid),
347
348         TP_ARGS(pid),
349
350         TP_STRUCT__entry(
351                 __array(        char,   comm,   TASK_COMM_LEN   )
352                 __field(        pid_t,  pid                     )
353                 __field(        int,    prio                    )
354         ),
355
356         TP_fast_assign(
357                 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
358                 __entry->pid            = pid_nr(pid);
359                 __entry->prio           = current->prio; /* XXX SCHED_DEADLINE */
360         ),
361
362         TP_printk("comm=%s pid=%d prio=%d",
363                   __entry->comm, __entry->pid, __entry->prio)
364 );
365
366 /*
367  * Tracepoint for kernel_clone:
368  */
369 TRACE_EVENT(sched_process_fork,
370
371         TP_PROTO(struct task_struct *parent, struct task_struct *child),
372
373         TP_ARGS(parent, child),
374
375         TP_STRUCT__entry(
376                 __array(        char,   parent_comm,    TASK_COMM_LEN   )
377                 __field(        pid_t,  parent_pid                      )
378                 __array(        char,   child_comm,     TASK_COMM_LEN   )
379                 __field(        pid_t,  child_pid                       )
380         ),
381
382         TP_fast_assign(
383                 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
384                 __entry->parent_pid     = parent->pid;
385                 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
386                 __entry->child_pid      = child->pid;
387         ),
388
389         TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
390                 __entry->parent_comm, __entry->parent_pid,
391                 __entry->child_comm, __entry->child_pid)
392 );
393
394 /*
395  * Tracepoint for exec:
396  */
397 TRACE_EVENT(sched_process_exec,
398
399         TP_PROTO(struct task_struct *p, pid_t old_pid,
400                  struct linux_binprm *bprm),
401
402         TP_ARGS(p, old_pid, bprm),
403
404         TP_STRUCT__entry(
405                 __string(       filename,       bprm->filename  )
406                 __field(        pid_t,          pid             )
407                 __field(        pid_t,          old_pid         )
408         ),
409
410         TP_fast_assign(
411                 __assign_str(filename, bprm->filename);
412                 __entry->pid            = p->pid;
413                 __entry->old_pid        = old_pid;
414         ),
415
416         TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
417                   __entry->pid, __entry->old_pid)
418 );
419
420
421 #ifdef CONFIG_SCHEDSTATS
422 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
423 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
424 #else
425 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
426 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
427 #endif
428
429 /*
430  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
431  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
432  */
433 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
434
435         TP_PROTO(struct task_struct *tsk, u64 delay),
436
437         TP_ARGS(__perf_task(tsk), __perf_count(delay)),
438
439         TP_STRUCT__entry(
440                 __array( char,  comm,   TASK_COMM_LEN   )
441                 __field( pid_t, pid                     )
442                 __field( u64,   delay                   )
443         ),
444
445         TP_fast_assign(
446                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
447                 __entry->pid    = tsk->pid;
448                 __entry->delay  = delay;
449         ),
450
451         TP_printk("comm=%s pid=%d delay=%Lu [ns]",
452                         __entry->comm, __entry->pid,
453                         (unsigned long long)__entry->delay)
454 );
455
456 /*
457  * Tracepoint for accounting wait time (time the task is runnable
458  * but not actually running due to scheduler contention).
459  */
460 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
461              TP_PROTO(struct task_struct *tsk, u64 delay),
462              TP_ARGS(tsk, delay));
463
464 /*
465  * Tracepoint for accounting sleep time (time the task is not runnable,
466  * including iowait, see below).
467  */
468 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
469              TP_PROTO(struct task_struct *tsk, u64 delay),
470              TP_ARGS(tsk, delay));
471
472 /*
473  * Tracepoint for accounting iowait time (time the task is not runnable
474  * due to waiting on IO to complete).
475  */
476 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
477              TP_PROTO(struct task_struct *tsk, u64 delay),
478              TP_ARGS(tsk, delay));
479
480 /*
481  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
482  */
483 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
484              TP_PROTO(struct task_struct *tsk, u64 delay),
485              TP_ARGS(tsk, delay));
486
487 /*
488  * Tracepoint for accounting runtime (time the task is executing
489  * on a CPU).
490  */
491 DECLARE_EVENT_CLASS(sched_stat_runtime,
492
493         TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
494
495         TP_ARGS(tsk, __perf_count(runtime), vruntime),
496
497         TP_STRUCT__entry(
498                 __array( char,  comm,   TASK_COMM_LEN   )
499                 __field( pid_t, pid                     )
500                 __field( u64,   runtime                 )
501                 __field( u64,   vruntime                        )
502         ),
503
504         TP_fast_assign(
505                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
506                 __entry->pid            = tsk->pid;
507                 __entry->runtime        = runtime;
508                 __entry->vruntime       = vruntime;
509         ),
510
511         TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
512                         __entry->comm, __entry->pid,
513                         (unsigned long long)__entry->runtime,
514                         (unsigned long long)__entry->vruntime)
515 );
516
517 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
518              TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
519              TP_ARGS(tsk, runtime, vruntime));
520
521 /*
522  * Tracepoint for showing priority inheritance modifying a tasks
523  * priority.
524  */
525 TRACE_EVENT(sched_pi_setprio,
526
527         TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
528
529         TP_ARGS(tsk, pi_task),
530
531         TP_STRUCT__entry(
532                 __array( char,  comm,   TASK_COMM_LEN   )
533                 __field( pid_t, pid                     )
534                 __field( int,   oldprio                 )
535                 __field( int,   newprio                 )
536         ),
537
538         TP_fast_assign(
539                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
540                 __entry->pid            = tsk->pid;
541                 __entry->oldprio        = tsk->prio;
542                 __entry->newprio        = pi_task ?
543                                 min(tsk->normal_prio, pi_task->prio) :
544                                 tsk->normal_prio;
545                 /* XXX SCHED_DEADLINE bits missing */
546         ),
547
548         TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
549                         __entry->comm, __entry->pid,
550                         __entry->oldprio, __entry->newprio)
551 );
552
553 #ifdef CONFIG_DETECT_HUNG_TASK
554 TRACE_EVENT(sched_process_hang,
555         TP_PROTO(struct task_struct *tsk),
556         TP_ARGS(tsk),
557
558         TP_STRUCT__entry(
559                 __array( char,  comm,   TASK_COMM_LEN   )
560                 __field( pid_t, pid                     )
561         ),
562
563         TP_fast_assign(
564                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
565                 __entry->pid = tsk->pid;
566         ),
567
568         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
569 );
570 #endif /* CONFIG_DETECT_HUNG_TASK */
571
572 /*
573  * Tracks migration of tasks from one runqueue to another. Can be used to
574  * detect if automatic NUMA balancing is bouncing between nodes.
575  */
576 TRACE_EVENT(sched_move_numa,
577
578         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
579
580         TP_ARGS(tsk, src_cpu, dst_cpu),
581
582         TP_STRUCT__entry(
583                 __field( pid_t, pid                     )
584                 __field( pid_t, tgid                    )
585                 __field( pid_t, ngid                    )
586                 __field( int,   src_cpu                 )
587                 __field( int,   src_nid                 )
588                 __field( int,   dst_cpu                 )
589                 __field( int,   dst_nid                 )
590         ),
591
592         TP_fast_assign(
593                 __entry->pid            = task_pid_nr(tsk);
594                 __entry->tgid           = task_tgid_nr(tsk);
595                 __entry->ngid           = task_numa_group_id(tsk);
596                 __entry->src_cpu        = src_cpu;
597                 __entry->src_nid        = cpu_to_node(src_cpu);
598                 __entry->dst_cpu        = dst_cpu;
599                 __entry->dst_nid        = cpu_to_node(dst_cpu);
600         ),
601
602         TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
603                         __entry->pid, __entry->tgid, __entry->ngid,
604                         __entry->src_cpu, __entry->src_nid,
605                         __entry->dst_cpu, __entry->dst_nid)
606 );
607
608 DECLARE_EVENT_CLASS(sched_numa_pair_template,
609
610         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
611                  struct task_struct *dst_tsk, int dst_cpu),
612
613         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
614
615         TP_STRUCT__entry(
616                 __field( pid_t, src_pid                 )
617                 __field( pid_t, src_tgid                )
618                 __field( pid_t, src_ngid                )
619                 __field( int,   src_cpu                 )
620                 __field( int,   src_nid                 )
621                 __field( pid_t, dst_pid                 )
622                 __field( pid_t, dst_tgid                )
623                 __field( pid_t, dst_ngid                )
624                 __field( int,   dst_cpu                 )
625                 __field( int,   dst_nid                 )
626         ),
627
628         TP_fast_assign(
629                 __entry->src_pid        = task_pid_nr(src_tsk);
630                 __entry->src_tgid       = task_tgid_nr(src_tsk);
631                 __entry->src_ngid       = task_numa_group_id(src_tsk);
632                 __entry->src_cpu        = src_cpu;
633                 __entry->src_nid        = cpu_to_node(src_cpu);
634                 __entry->dst_pid        = dst_tsk ? task_pid_nr(dst_tsk) : 0;
635                 __entry->dst_tgid       = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
636                 __entry->dst_ngid       = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
637                 __entry->dst_cpu        = dst_cpu;
638                 __entry->dst_nid        = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
639         ),
640
641         TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
642                         __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
643                         __entry->src_cpu, __entry->src_nid,
644                         __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
645                         __entry->dst_cpu, __entry->dst_nid)
646 );
647
648 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
649
650         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
651                  struct task_struct *dst_tsk, int dst_cpu),
652
653         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
654 );
655
656 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
657
658         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
659                  struct task_struct *dst_tsk, int dst_cpu),
660
661         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
662 );
663
664
665 /*
666  * Tracepoint for waking a polling cpu without an IPI.
667  */
668 TRACE_EVENT(sched_wake_idle_without_ipi,
669
670         TP_PROTO(int cpu),
671
672         TP_ARGS(cpu),
673
674         TP_STRUCT__entry(
675                 __field(        int,    cpu     )
676         ),
677
678         TP_fast_assign(
679                 __entry->cpu    = cpu;
680         ),
681
682         TP_printk("cpu=%d", __entry->cpu)
683 );
684
685 /*
686  * Following tracepoints are not exported in tracefs and provide hooking
687  * mechanisms only for testing and debugging purposes.
688  *
689  * Postfixed with _tp to make them easily identifiable in the code.
690  */
691 DECLARE_TRACE(pelt_cfs_tp,
692         TP_PROTO(struct cfs_rq *cfs_rq),
693         TP_ARGS(cfs_rq));
694
695 DECLARE_TRACE(pelt_rt_tp,
696         TP_PROTO(struct rq *rq),
697         TP_ARGS(rq));
698
699 DECLARE_TRACE(pelt_dl_tp,
700         TP_PROTO(struct rq *rq),
701         TP_ARGS(rq));
702
703 DECLARE_TRACE(pelt_thermal_tp,
704         TP_PROTO(struct rq *rq),
705         TP_ARGS(rq));
706
707 DECLARE_TRACE(pelt_irq_tp,
708         TP_PROTO(struct rq *rq),
709         TP_ARGS(rq));
710
711 DECLARE_TRACE(pelt_se_tp,
712         TP_PROTO(struct sched_entity *se),
713         TP_ARGS(se));
714
715 DECLARE_TRACE(sched_cpu_capacity_tp,
716         TP_PROTO(struct rq *rq),
717         TP_ARGS(rq));
718
719 DECLARE_TRACE(sched_overutilized_tp,
720         TP_PROTO(struct root_domain *rd, bool overutilized),
721         TP_ARGS(rd, overutilized));
722
723 DECLARE_TRACE(sched_util_est_cfs_tp,
724         TP_PROTO(struct cfs_rq *cfs_rq),
725         TP_ARGS(cfs_rq));
726
727 DECLARE_TRACE(sched_util_est_se_tp,
728         TP_PROTO(struct sched_entity *se),
729         TP_ARGS(se));
730
731 DECLARE_TRACE(sched_update_nr_running_tp,
732         TP_PROTO(struct rq *rq, int change),
733         TP_ARGS(rq, change));
734
735 #endif /* _TRACE_SCHED_H */
736
737 /* This part must be outside protection */
738 #include <trace/define_trace.h>