Merge branch 'signal-for-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / include / linux / sched.h
index 78c351e..a6a2db5 100644 (file)
@@ -523,7 +523,11 @@ struct sched_statistics {
        u64                             nr_wakeups_affine_attempts;
        u64                             nr_wakeups_passive;
        u64                             nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+       u64                             core_forceidle_sum;
 #endif
+#endif /* CONFIG_SCHEDSTATS */
 } ____cacheline_aligned;
 
 struct sched_entity {
@@ -987,8 +991,8 @@ struct task_struct {
        /* CLONE_CHILD_CLEARTID: */
        int __user                      *clear_child_tid;
 
-       /* PF_IO_WORKER */
-       void                            *pf_io_worker;
+       /* PF_KTHREAD | PF_IO_WORKER */
+       void                            *worker_private;
 
        u64                             utime;
        u64                             stime;
@@ -1339,6 +1343,9 @@ struct task_struct {
 #ifdef CONFIG_TRACE_IRQFLAGS
        struct irqtrace_events          kcsan_save_irqtrace;
 #endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+       int                             kcsan_stack_depth;
+#endif
 #endif
 
 #if IS_ENABLED(CONFIG_KUNIT)
@@ -2171,6 +2178,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 #endif
 
 #ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+       /*
+        * As lock holder preemption issue, we both skip spinning if
+        * task is not on cpu or its cpu is preempted
+        */
+       return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
+}
+
 /* Returns effective CPU energy utilization, as seen by the scheduler */
 unsigned long sched_cpu_util(int cpu, unsigned long max);
 #endif /* CONFIG_SMP */