Merge tag 'v5.6-rc3' into sched/core, to pick up fixes and dependent patches
[linux-2.6-microblaze.git] / kernel / sched / fair.c
index 94c3b84..f38ff5a 100644 (file)
@@ -3516,7 +3516,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
  * @cfs_rq: cfs_rq to attach to
  * @se: sched_entity to attach
- * @flags: migration hints
  *
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
@@ -5787,10 +5786,12 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
                bool idle = true;
 
                for_each_cpu(cpu, cpu_smt_mask(core)) {
-                       __cpumask_clear_cpu(cpu, cpus);
-                       if (!available_idle_cpu(cpu))
+                       if (!available_idle_cpu(cpu)) {
                                idle = false;
+                               break;
+                       }
                }
+               cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
 
                if (idle)
                        return core;
@@ -5894,6 +5895,40 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
        return cpu;
 }
 
+/*
+ * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
+ * the task fits. If no CPU is big enough, but there are idle ones, try to
+ * maximize capacity.
+ */
+static int
+select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+{
+       unsigned long best_cap = 0;
+       int cpu, best_cpu = -1;
+       struct cpumask *cpus;
+
+       sync_entity_load_avg(&p->se);
+
+       cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+       cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+
+       for_each_cpu_wrap(cpu, cpus, target) {
+               unsigned long cpu_cap = capacity_of(cpu);
+
+               if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+                       continue;
+               if (task_fits_capacity(p, cpu_cap))
+                       return cpu;
+
+               if (cpu_cap > best_cap) {
+                       best_cap = cpu_cap;
+                       best_cpu = cpu;
+               }
+       }
+
+       return best_cpu;
+}
+
 /*
  * Try and locate an idle core/thread in the LLC cache domain.
  */
@@ -5902,6 +5937,28 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
        struct sched_domain *sd;
        int i, recent_used_cpu;
 
+       /*
+        * For asymmetric CPU capacity systems, our domain of interest is
+        * sd_asym_cpucapacity rather than sd_llc.
+        */
+       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+               sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
+               /*
+                * On an asymmetric CPU capacity system where an exclusive
+                * cpuset defines a symmetric island (i.e. one unique
+                * capacity_orig value through the cpuset), the key will be set
+                * but the CPUs within that cpuset will not have a domain with
+                * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
+                * capacity path.
+                */
+               if (!sd)
+                       goto symmetric;
+
+               i = select_idle_capacity(p, sd, target);
+               return ((unsigned)i < nr_cpumask_bits) ? i : target;
+       }
+
+symmetric:
        if (available_idle_cpu(target) || sched_idle_cpu(target))
                return target;
 
@@ -6101,33 +6158,6 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
        return min_t(unsigned long, util, capacity_orig_of(cpu));
 }
 
-/*
- * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
- * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
- *
- * In that case WAKE_AFFINE doesn't make sense and we'll let
- * BALANCE_WAKE sort things out.
- */
-static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
-{
-       long min_cap, max_cap;
-
-       if (!static_branch_unlikely(&sched_asym_cpucapacity))
-               return 0;
-
-       min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
-       max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
-
-       /* Minimum capacity is close to max, no need to abort wake_affine */
-       if (max_cap - min_cap < max_cap >> 3)
-               return 0;
-
-       /* Bring task utilization in sync with prev_cpu */
-       sync_entity_load_avg(&p->se);
-
-       return !task_fits_capacity(p, min_cap);
-}
-
 /*
  * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
  * to @dst_cpu.
@@ -6392,8 +6422,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                        new_cpu = prev_cpu;
                }
 
-               want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
-                             cpumask_test_cpu(cpu, p->cpus_ptr);
+               want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
        }
 
        rcu_read_lock();