Merge tag 'driver-core-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / include / linux / sched.h
index 28a98fc..ec8d07d 100644 (file)
@@ -113,11 +113,13 @@ struct task_group;
                                         __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
                                         TASK_PARKED)
 
-#define task_is_traced(task)           ((task->state & __TASK_TRACED) != 0)
+#define task_is_running(task)          (READ_ONCE((task)->__state) == TASK_RUNNING)
 
-#define task_is_stopped(task)          ((task->state & __TASK_STOPPED) != 0)
+#define task_is_traced(task)           ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
 
-#define task_is_stopped_or_traced(task)        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_is_stopped(task)          ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
+
+#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
@@ -132,14 +134,14 @@ struct task_group;
        do {                                                    \
                WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
-               current->state = (state_value);                 \
+               WRITE_ONCE(current->__state, (state_value));    \
        } while (0)
 
 #define set_current_state(state_value)                         \
        do {                                                    \
                WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
-               smp_store_mb(current->state, (state_value));    \
+               smp_store_mb(current->__state, (state_value));  \
        } while (0)
 
 #define set_special_state(state_value)                                 \
@@ -148,7 +150,7 @@ struct task_group;
                WARN_ON_ONCE(!is_special_task_state(state_value));      \
                raw_spin_lock_irqsave(&current->pi_lock, flags);        \
                current->task_state_change = _THIS_IP_;                 \
-               current->state = (state_value);                         \
+               WRITE_ONCE(current->__state, (state_value));            \
                raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
 #else
@@ -190,10 +192,10 @@ struct task_group;
  * Also see the comments of try_to_wake_up().
  */
 #define __set_current_state(state_value)                               \
-       current->state = (state_value)
+       WRITE_ONCE(current->__state, (state_value))
 
 #define set_current_state(state_value)                                 \
-       smp_store_mb(current->state, (state_value))
+       smp_store_mb(current->__state, (state_value))
 
 /*
  * set_special_state() should be used for those states when the blocking task
@@ -205,12 +207,14 @@ struct task_group;
        do {                                                            \
                unsigned long flags; /* may shadow */                   \
                raw_spin_lock_irqsave(&current->pi_lock, flags);        \
-               current->state = (state_value);                         \
+               WRITE_ONCE(current->__state, (state_value));            \
                raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
 
 #endif
 
+#define get_current_state()    READ_ONCE(current->__state)
+
 /* Task command name length: */
 #define TASK_COMM_LEN                  16
 
@@ -662,8 +666,7 @@ struct task_struct {
         */
        struct thread_info              thread_info;
 #endif
-       /* -1 unrunnable, 0 runnable, >0 stopped: */
-       volatile long                   state;
+       unsigned int                    __state;
 
        /*
         * This begins the randomizable portion of task_struct. Only
@@ -708,10 +711,17 @@ struct task_struct {
        const struct sched_class        *sched_class;
        struct sched_entity             se;
        struct sched_rt_entity          rt;
+       struct sched_dl_entity          dl;
+
+#ifdef CONFIG_SCHED_CORE
+       struct rb_node                  core_node;
+       unsigned long                   core_cookie;
+       unsigned int                    core_occupation;
+#endif
+
 #ifdef CONFIG_CGROUP_SCHED
        struct task_group               *sched_task_group;
 #endif
-       struct sched_dl_entity          dl;
 
 #ifdef CONFIG_UCLAMP_TASK
        /*
@@ -997,7 +1007,6 @@ struct task_struct {
        /* Signal handlers: */
        struct signal_struct            *signal;
        struct sighand_struct __rcu             *sighand;
-       struct sigqueue                 *sigqueue_cache;
        sigset_t                        blocked;
        sigset_t                        real_blocked;
        /* Restored if set_restore_sigmask() was used: */
@@ -1521,7 +1530,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
 
 static inline unsigned int task_state_index(struct task_struct *tsk)
 {
-       unsigned int tsk_state = READ_ONCE(tsk->state);
+       unsigned int tsk_state = READ_ONCE(tsk->__state);
        unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
 
        BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
@@ -1829,10 +1838,10 @@ static __always_inline void scheduler_ipi(void)
         */
        preempt_fold_need_resched();
 }
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
 #else
 static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
 {
        return 1;
 }
@@ -2019,6 +2028,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 
 #endif /* CONFIG_SMP */
 
+extern bool sched_task_on_rq(struct task_struct *p);
+
 /*
  * In order to reduce various lock holder preemption latencies provide an
  * interface to see if a vCPU is currently running or not.
@@ -2180,4 +2191,14 @@ int sched_trace_rq_nr_running(struct rq *rq);
 
 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
 
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+                               unsigned long uaddr);
+#else
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+#endif
+
 #endif