Merge branch 'for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[linux-2.6-microblaze.git] / kernel / cgroup / cpuset.c
index 85e0416..479743d 100644 (file)
@@ -113,6 +113,9 @@ struct cpuset {
         * CPUs allocated to child sub-partitions (default hierarchy only)
         * - CPUs granted by the parent = effective_cpus U subparts_cpus
         * - effective_cpus and subparts_cpus are mutually exclusive.
+        *
+        * effective_cpus contains only onlined CPUs, but subparts_cpus
+        * may have offlined ones.
         */
        cpumask_var_t subparts_cpus;
 
@@ -147,16 +150,33 @@ struct cpuset {
 
        /* partition root state */
        int partition_root_state;
+
+       /*
+        * Default hierarchy only:
+        * use_parent_ecpus - set if using parent's effective_cpus
+        * child_ecpus_count - # of children with use_parent_ecpus set
+        */
+       int use_parent_ecpus;
+       int child_ecpus_count;
 };
 
 /*
  * Partition root states:
  *
  *   0 - not a partition root
+ *
  *   1 - partition root
+ *
+ *  -1 - invalid partition root
+ *       None of the cpus in cpus_allowed can be put into the parent's
+ *       subparts_cpus. In this case, the cpuset is not a real partition
+ *       root anymore.  However, the CPU_EXCLUSIVE bit will still be set
+ *       and the cpuset can be restored back to a partition root if the
+ *       parent cpuset can give more CPUs back to this child cpuset.
  */
 #define PRS_DISABLED           0
 #define PRS_ENABLED            1
+#define PRS_ERROR              -1
 
 /*
  * Temporary cpumasks for working with partitions that are passed among
@@ -251,7 +271,7 @@ static inline int is_spread_slab(const struct cpuset *cs)
 
 static inline int is_partition_root(const struct cpuset *cs)
 {
-       return cs->partition_root_state;
+       return cs->partition_root_state > 0;
 }
 
 static struct cpuset top_cpuset = {
@@ -749,13 +769,14 @@ static int generate_sched_domains(cpumask_var_t **domains,
        int ndoms = 0;          /* number of sched domains in result */
        int nslot;              /* next empty doms[] struct cpumask slot */
        struct cgroup_subsys_state *pos_css;
+       bool root_load_balance = is_sched_load_balance(&top_cpuset);
 
        doms = NULL;
        dattr = NULL;
        csa = NULL;
 
        /* Special case for the 99% of systems with one, full, sched domain */
-       if (is_sched_load_balance(&top_cpuset)) {
+       if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
                ndoms = 1;
                doms = alloc_sched_domains(ndoms);
                if (!doms)
@@ -778,6 +799,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
        csn = 0;
 
        rcu_read_lock();
+       if (root_load_balance)
+               csa[csn++] = &top_cpuset;
        cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
                if (cp == &top_cpuset)
                        continue;
@@ -788,6 +811,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
                 * parent's cpus, so just skip them, and then we call
                 * update_domain_attr_tree() to calc relax_domain_level of
                 * the corresponding sched domain.
+                *
+                * If root is load-balancing, we can skip @cp if it
+                * is a subset of the root's effective_cpus.
                 */
                if (!cpumask_empty(cp->cpus_allowed) &&
                    !(is_sched_load_balance(cp) &&
@@ -795,11 +821,16 @@ static int generate_sched_domains(cpumask_var_t **domains,
                                         housekeeping_cpumask(HK_FLAG_DOMAIN))))
                        continue;
 
+               if (root_load_balance &&
+                   cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
+                       continue;
+
                if (is_sched_load_balance(cp))
                        csa[csn++] = cp;
 
-               /* skip @cp's subtree */
-               pos_css = css_rightmost_descendant(pos_css);
+               /* skip @cp's subtree if not a partition root */
+               if (!is_partition_root(cp))
+                       pos_css = css_rightmost_descendant(pos_css);
        }
        rcu_read_unlock();
 
@@ -927,7 +958,12 @@ static void rebuild_sched_domains_locked(void)
         * passing doms with offlined cpu to partition_sched_domains().
         * Anyways, hotplug work item will rebuild sched domains.
         */
-       if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
+       if (!top_cpuset.nr_subparts_cpus &&
+           !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
+               goto out;
+
+       if (top_cpuset.nr_subparts_cpus &&
+          !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
                goto out;
 
        /* Generate domain masks and attrs */
@@ -977,7 +1013,9 @@ static void update_tasks_cpumask(struct cpuset *cs)
  * @parent: the parent cpuset
  *
  * If the parent has subpartition CPUs, include them in the list of
- * allowable CPUs in computing the new effective_cpus mask.
+ * allowable CPUs in computing the new effective_cpus mask. Since offlined
+ * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
+ * to mask those out.
  */
 static void compute_effective_cpumask(struct cpumask *new_cpus,
                                      struct cpuset *cs, struct cpuset *parent)
@@ -986,6 +1024,7 @@ static void compute_effective_cpumask(struct cpumask *new_cpus,
                cpumask_or(new_cpus, parent->effective_cpus,
                           parent->subparts_cpus);
                cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
+               cpumask_and(new_cpus, new_cpus, cpu_active_mask);
        } else {
                cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
        }
@@ -1021,9 +1060,12 @@ enum subparts_cmd {
  *
  * For partcmd_update, if the optional newmask is specified, the cpu
  * list is to be changed from cpus_allowed to newmask. Otherwise,
- * cpus_allowed is assumed to remain the same.  The function will return
- * 1 if changes to parent's subparts_cpus and effective_cpus happen or 0
- * otherwise. In case of error, an error code will be returned.
+ * cpus_allowed is assumed to remain the same. The cpuset should either
+ * be a partition root or an invalid partition root. The partition root
+ * state may change if newmask is NULL and none of the requested CPUs can
+ * be granted by the parent. The function will return 1 if changes to
+ * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
+ * Error code should only be returned when newmask is non-NULL.
  *
  * The partcmd_enable and partcmd_disable commands are used by
  * update_prstate(). The partcmd_update command is used by
@@ -1046,6 +1088,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
        struct cpuset *parent = parent_cs(cpuset);
        int adding;     /* Moving cpus from effective_cpus to subparts_cpus */
        int deleting;   /* Moving cpus from subparts_cpus to effective_cpus */
+       bool part_error = false;        /* Partition error? */
 
        lockdep_assert_held(&cpuset_mutex);
 
@@ -1104,9 +1147,20 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
                /*
                 * Return error if the new effective_cpus could become empty.
                 */
-               if (adding && !deleting &&
-                   cpumask_equal(parent->effective_cpus, tmp->addmask))
-                       return -EINVAL;
+               if (adding &&
+                   cpumask_equal(parent->effective_cpus, tmp->addmask)) {
+                       if (!deleting)
+                               return -EINVAL;
+                       /*
+                        * As some of the CPUs in subparts_cpus might have
+                        * been offlined, we need to compute the real delmask
+                        * to confirm that.
+                        */
+                       if (!cpumask_and(tmp->addmask, tmp->delmask,
+                                        cpu_active_mask))
+                               return -EINVAL;
+                       cpumask_copy(tmp->addmask, parent->effective_cpus);
+               }
        } else {
                /*
                 * partcmd_update w/o newmask:
@@ -1114,13 +1168,48 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
                 * addmask = cpus_allowed & parent->effectiveb_cpus
                 *
                 * Note that parent's subparts_cpus may have been
-                * pre-shrunk in case the CPUs granted to the parent
-                * by the grandparent changes. So no deletion is needed.
+                * pre-shrunk in case there is a change in the cpu list.
+                * So no deletion is needed.
                 */
                adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed,
                                     parent->effective_cpus);
-               if (cpumask_equal(tmp->addmask, parent->effective_cpus))
-                       return -EINVAL;
+               part_error = cpumask_equal(tmp->addmask,
+                                          parent->effective_cpus);
+       }
+
+       if (cmd == partcmd_update) {
+               int prev_prs = cpuset->partition_root_state;
+
+               /*
+                * Check for possible transition between PRS_ENABLED
+                * and PRS_ERROR.
+                */
+               switch (cpuset->partition_root_state) {
+               case PRS_ENABLED:
+                       if (part_error)
+                               cpuset->partition_root_state = PRS_ERROR;
+                       break;
+               case PRS_ERROR:
+                       if (!part_error)
+                               cpuset->partition_root_state = PRS_ENABLED;
+                       break;
+               }
+               /*
+                * Set part_error if previously in invalid state.
+                */
+               part_error = (prev_prs == PRS_ERROR);
+       }
+
+       if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
+               return 0;       /* Nothing need to be done */
+
+       if (cpuset->partition_root_state == PRS_ERROR) {
+               /*
+                * Remove all its cpus from parent's subparts_cpus.
+                */
+               adding = false;
+               deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
+                                      parent->subparts_cpus);
        }
 
        if (!adding && !deleting)
@@ -1141,6 +1230,10 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
        if (deleting) {
                cpumask_andnot(parent->subparts_cpus,
                               parent->subparts_cpus, tmp->delmask);
+               /*
+                * Some of the CPUs in subparts_cpus might have been offlined.
+                */
+               cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
                cpumask_or(parent->effective_cpus,
                           parent->effective_cpus, tmp->delmask);
        }
@@ -1172,28 +1265,30 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
        rcu_read_lock();
        cpuset_for_each_descendant_pre(cp, pos_css, cs) {
                struct cpuset *parent = parent_cs(cp);
-               bool cs_empty;
 
                compute_effective_cpumask(tmp->new_cpus, cp, parent);
-               cs_empty = cpumask_empty(tmp->new_cpus);
-
-               /*
-                * A partition root cannot have empty effective_cpus
-                */
-               WARN_ON_ONCE(cs_empty && is_partition_root(cp));
 
                /*
                 * If it becomes empty, inherit the effective mask of the
                 * parent, which is guaranteed to have some CPUs.
                 */
-               if (is_in_v2_mode() && cs_empty)
+               if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
                        cpumask_copy(tmp->new_cpus, parent->effective_cpus);
+                       if (!cp->use_parent_ecpus) {
+                               cp->use_parent_ecpus = true;
+                               parent->child_ecpus_count++;
+                       }
+               } else if (cp->use_parent_ecpus) {
+                       cp->use_parent_ecpus = false;
+                       WARN_ON_ONCE(!parent->child_ecpus_count);
+                       parent->child_ecpus_count--;
+               }
 
                /*
                 * Skip the whole subtree if the cpumask remains the same
                 * and has no partition root state.
                 */
-               if (!is_partition_root(cp) &&
+               if (!cp->partition_root_state &&
                    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
                        pos_css = css_rightmost_descendant(pos_css);
                        continue;
@@ -1205,11 +1300,44 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
                 * update_tasks_cpumask() again for tasks in the parent
                 * cpuset if the parent's subparts_cpus changes.
                 */
-               if ((cp != cs) && cp->partition_root_state &&
-                   update_parent_subparts_cpumask(cp, partcmd_update,
-                                                  NULL, tmp)) {
-                       if (parent != &top_cpuset)
-                               update_tasks_cpumask(parent);
+               if ((cp != cs) && cp->partition_root_state) {
+                       switch (parent->partition_root_state) {
+                       case PRS_DISABLED:
+                               /*
+                                * If parent is not a partition root or an
+                                * invalid partition root, clear the state
+                                * state and the CS_CPU_EXCLUSIVE flag.
+                                */
+                               WARN_ON_ONCE(cp->partition_root_state
+                                            != PRS_ERROR);
+                               cp->partition_root_state = 0;
+
+                               /*
+                                * clear_bit() is an atomic operation and
+                                * readers aren't interested in the state
+                                * of CS_CPU_EXCLUSIVE anyway. So we can
+                                * just update the flag without holding
+                                * the callback_lock.
+                                */
+                               clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
+                               break;
+
+                       case PRS_ENABLED:
+                               if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp))
+                                       update_tasks_cpumask(parent);
+                               break;
+
+                       case PRS_ERROR:
+                               /*
+                                * When parent is invalid, it has to be too.
+                                */
+                               cp->partition_root_state = PRS_ERROR;
+                               if (cp->nr_subparts_cpus) {
+                                       cp->nr_subparts_cpus = 0;
+                                       cpumask_clear(cp->subparts_cpus);
+                               }
+                               break;
+                       }
                }
 
                if (!css_tryget_online(&cp->css))
@@ -1219,13 +1347,33 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
                spin_lock_irq(&callback_lock);
 
                cpumask_copy(cp->effective_cpus, tmp->new_cpus);
-               if (cp->nr_subparts_cpus) {
+               if (cp->nr_subparts_cpus &&
+                  (cp->partition_root_state != PRS_ENABLED)) {
+                       cp->nr_subparts_cpus = 0;
+                       cpumask_clear(cp->subparts_cpus);
+               } else if (cp->nr_subparts_cpus) {
                        /*
                         * Make sure that effective_cpus & subparts_cpus
                         * are mutually exclusive.
+                        *
+                        * In the unlikely event that effective_cpus
+                        * becomes empty. we clear cp->nr_subparts_cpus and
+                        * let its child partition roots to compete for
+                        * CPUs again.
                         */
                        cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
                                       cp->subparts_cpus);
+                       if (cpumask_empty(cp->effective_cpus)) {
+                               cpumask_copy(cp->effective_cpus, tmp->new_cpus);
+                               cpumask_clear(cp->subparts_cpus);
+                               cp->nr_subparts_cpus = 0;
+                       } else if (!cpumask_subset(cp->subparts_cpus,
+                                                  tmp->new_cpus)) {
+                               cpumask_andnot(cp->subparts_cpus,
+                                       cp->subparts_cpus, tmp->new_cpus);
+                               cp->nr_subparts_cpus
+                                       = cpumask_weight(cp->subparts_cpus);
+                       }
                }
                spin_unlock_irq(&callback_lock);
 
@@ -1235,11 +1383,15 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
                update_tasks_cpumask(cp);
 
                /*
-                * If the effective cpumask of any non-empty cpuset is changed,
-                * we need to rebuild sched domains.
+                * On legacy hierarchy, if the effective cpumask of any non-
+                * empty cpuset is changed, we need to rebuild sched domains.
+                * On default hierarchy, the cpuset needs to be a partition
+                * root as well.
                 */
                if (!cpumask_empty(cp->cpus_allowed) &&
-                   is_sched_load_balance(cp))
+                   is_sched_load_balance(cp) &&
+                  (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
+                   is_partition_root(cp)))
                        need_rebuild_sched_domains = true;
 
                rcu_read_lock();
@@ -1251,6 +1403,35 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
                rebuild_sched_domains_locked();
 }
 
+/**
+ * update_sibling_cpumasks - Update siblings cpumasks
+ * @parent:  Parent cpuset
+ * @cs:      Current cpuset
+ * @tmp:     Temp variables
+ */
+static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+                                   struct tmpmasks *tmp)
+{
+       struct cpuset *sibling;
+       struct cgroup_subsys_state *pos_css;
+
+       /*
+        * Check all its siblings and call update_cpumasks_hier()
+        * if their use_parent_ecpus flag is set in order for them
+        * to use the right effective_cpus value.
+        */
+       rcu_read_lock();
+       cpuset_for_each_child(sibling, pos_css, parent) {
+               if (sibling == cs)
+                       continue;
+               if (!sibling->use_parent_ecpus)
+                       continue;
+
+               update_cpumasks_hier(sibling, tmp);
+       }
+       rcu_read_unlock();
+}
+
 /**
  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
  * @cs: the cpuset to consider
@@ -1326,6 +1507,17 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        update_cpumasks_hier(cs, &tmp);
+
+       if (cs->partition_root_state) {
+               struct cpuset *parent = parent_cs(cs);
+
+               /*
+                * For partition root, update the cpumasks of sibling
+                * cpusets if they use parent's effective_cpus.
+                */
+               if (parent->child_ecpus_count)
+                       update_sibling_cpumasks(parent, cs, &tmp);
+       }
        return 0;
 }
 
@@ -1702,7 +1894,7 @@ static int update_prstate(struct cpuset *cs, int val)
                return 0;
 
        /*
-        * Cannot force a partial or erroneous partition root to a full
+        * Cannot force a partial or invalid partition root to a full
         * partition root.
         */
        if (val && cs->partition_root_state)
@@ -1733,6 +1925,17 @@ static int update_prstate(struct cpuset *cs, int val)
                }
                cs->partition_root_state = PRS_ENABLED;
        } else {
+               /*
+                * Turning off partition root will clear the
+                * CS_CPU_EXCLUSIVE bit.
+                */
+               if (cs->partition_root_state == PRS_ERROR) {
+                       cs->partition_root_state = 0;
+                       update_flag(CS_CPU_EXCLUSIVE, cs, 0);
+                       err = 0;
+                       goto out;
+               }
+
                err = update_parent_subparts_cpumask(cs, partcmd_disable,
                                                     NULL, &tmp);
                if (err)
@@ -1751,6 +1954,9 @@ static int update_prstate(struct cpuset *cs, int val)
        if (parent != &top_cpuset)
                update_tasks_cpumask(parent);
 
+       if (parent->child_ecpus_count)
+               update_sibling_cpumasks(parent, cs, &tmp);
+
        rebuild_sched_domains_locked();
 out:
        free_cpumasks(NULL, &tmp);
@@ -1903,10 +2109,8 @@ out_unlock:
 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
-       struct cpuset *cs;
 
        cgroup_taskset_first(tset, &css);
-       cs = css_cs(css);
 
        mutex_lock(&cpuset_mutex);
        css_cs(css)->attach_in_progress--;
@@ -1998,6 +2202,7 @@ typedef enum {
        FILE_MEMLIST,
        FILE_EFFECTIVE_CPULIST,
        FILE_EFFECTIVE_MEMLIST,
+       FILE_SUBPARTS_CPULIST,
        FILE_CPU_EXCLUSIVE,
        FILE_MEM_EXCLUSIVE,
        FILE_MEM_HARDWALL,
@@ -2072,9 +2277,6 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
                retval = update_relax_domain_level(cs, val);
                break;
-       case FILE_PARTITION_ROOT:
-               retval = update_prstate(cs, val);
-               break;
        default:
                retval = -EINVAL;
                break;
@@ -2179,6 +2381,9 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
        case FILE_EFFECTIVE_MEMLIST:
                seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
                break;
+       case FILE_SUBPARTS_CPULIST:
+               seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
+               break;
        default:
                ret = -EINVAL;
        }
@@ -2225,8 +2430,6 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
        switch (type) {
        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
                return cs->relax_domain_level;
-       case FILE_PARTITION_ROOT:
-               return cs->partition_root_state;
        default:
                BUG();
        }
@@ -2235,6 +2438,55 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
        return 0;
 }
 
+static int sched_partition_show(struct seq_file *seq, void *v)
+{
+       struct cpuset *cs = css_cs(seq_css(seq));
+
+       switch (cs->partition_root_state) {
+       case PRS_ENABLED:
+               seq_puts(seq, "root\n");
+               break;
+       case PRS_DISABLED:
+               seq_puts(seq, "member\n");
+               break;
+       case PRS_ERROR:
+               seq_puts(seq, "root invalid\n");
+               break;
+       }
+       return 0;
+}
+
+static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
+                                    size_t nbytes, loff_t off)
+{
+       struct cpuset *cs = css_cs(of_css(of));
+       int val;
+       int retval = -ENODEV;
+
+       buf = strstrip(buf);
+
+       /*
+        * Convert "root" to ENABLED, and convert "member" to DISABLED.
+        */
+       if (!strcmp(buf, "root"))
+               val = PRS_ENABLED;
+       else if (!strcmp(buf, "member"))
+               val = PRS_DISABLED;
+       else
+               return -EINVAL;
+
+       css_get(&cs->css);
+       mutex_lock(&cpuset_mutex);
+       if (!is_cpuset_online(cs))
+               goto out_unlock;
+
+       retval = update_prstate(cs, val);
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
+       css_put(&cs->css);
+       return retval ?: nbytes;
+}
+
 /*
  * for the common functions, 'private' gives the type of file
  */
@@ -2368,24 +2620,29 @@ static struct cftype dfl_files[] = {
                .name = "cpus.effective",
                .seq_show = cpuset_common_seq_show,
                .private = FILE_EFFECTIVE_CPULIST,
-               .flags = CFTYPE_NOT_ON_ROOT,
        },
 
        {
                .name = "mems.effective",
                .seq_show = cpuset_common_seq_show,
                .private = FILE_EFFECTIVE_MEMLIST,
-               .flags = CFTYPE_NOT_ON_ROOT,
        },
 
        {
-               .name = "sched.partition",
-               .read_s64 = cpuset_read_s64,
-               .write_s64 = cpuset_write_s64,
+               .name = "cpus.partition",
+               .seq_show = sched_partition_show,
+               .write = sched_partition_write,
                .private = FILE_PARTITION_ROOT,
                .flags = CFTYPE_NOT_ON_ROOT,
        },
 
+       {
+               .name = "cpus.subpartitions",
+               .seq_show = cpuset_common_seq_show,
+               .private = FILE_SUBPARTS_CPULIST,
+               .flags = CFTYPE_DEBUG,
+       },
+
        { }     /* terminate */
 };
 
@@ -2445,6 +2702,8 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
        if (is_in_v2_mode()) {
                cpumask_copy(cs->effective_cpus, parent->effective_cpus);
                cs->effective_mems = parent->effective_mems;
+               cs->use_parent_ecpus = true;
+               parent->child_ecpus_count++;
        }
        spin_unlock_irq(&callback_lock);
 
@@ -2508,6 +2767,13 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
            is_sched_load_balance(cs))
                update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 
+       if (cs->use_parent_ecpus) {
+               struct cpuset *parent = parent_cs(cs);
+
+               cs->use_parent_ecpus = false;
+               parent->child_ecpus_count--;
+       }
+
        cpuset_dec();
        clear_bit(CS_ONLINE, &cs->flags);
 
@@ -2689,20 +2955,29 @@ hotplug_update_tasks(struct cpuset *cs,
                update_tasks_nodemask(cs);
 }
 
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+       force_rebuild = true;
+}
+
 /**
  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
  * @cs: cpuset in interest
+ * @tmp: the tmpmasks structure pointer
  *
  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
  * all its tasks are moved to the nearest ancestor with both resources.
  */
-static void cpuset_hotplug_update_tasks(struct cpuset *cs)
+static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
 {
        static cpumask_t new_cpus;
        static nodemask_t new_mems;
        bool cpus_updated;
        bool mems_updated;
+       struct cpuset *parent;
 retry:
        wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
 
@@ -2717,9 +2992,60 @@ retry:
                goto retry;
        }
 
-       cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
-       nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
+       parent =  parent_cs(cs);
+       compute_effective_cpumask(&new_cpus, cs, parent);
+       nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
+
+       if (cs->nr_subparts_cpus)
+               /*
+                * Make sure that CPUs allocated to child partitions
+                * do not show up in effective_cpus.
+                */
+               cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
+
+       if (!tmp || !cs->partition_root_state)
+               goto update_tasks;
+
+       /*
+        * In the unlikely event that a partition root has empty
+        * effective_cpus or its parent becomes erroneous, we have to
+        * transition it to the erroneous state.
+        */
+       if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
+          (parent->partition_root_state == PRS_ERROR))) {
+               if (cs->nr_subparts_cpus) {
+                       cs->nr_subparts_cpus = 0;
+                       cpumask_clear(cs->subparts_cpus);
+                       compute_effective_cpumask(&new_cpus, cs, parent);
+               }
 
+               /*
+                * If the effective_cpus is empty because the child
+                * partitions take away all the CPUs, we can keep
+                * the current partition and let the child partitions
+                * fight for available CPUs.
+                */
+               if ((parent->partition_root_state == PRS_ERROR) ||
+                    cpumask_empty(&new_cpus)) {
+                       update_parent_subparts_cpumask(cs, partcmd_disable,
+                                                      NULL, tmp);
+                       cs->partition_root_state = PRS_ERROR;
+               }
+               cpuset_force_rebuild();
+       }
+
+       /*
+        * On the other hand, an erroneous partition root may be transitioned
+        * back to a regular one or a partition root with no CPU allocated
+        * from the parent may change to erroneous.
+        */
+       if (is_partition_root(parent) &&
+          ((cs->partition_root_state == PRS_ERROR) ||
+           !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
+            update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp))
+               cpuset_force_rebuild();
+
+update_tasks:
        cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
        mems_updated = !nodes_equal(new_mems, cs->effective_mems);
 
@@ -2733,13 +3059,6 @@ retry:
        mutex_unlock(&cpuset_mutex);
 }
 
-static bool force_rebuild;
-
-void cpuset_force_rebuild(void)
-{
-       force_rebuild = true;
-}
-
 /**
  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
  *
@@ -2762,6 +3081,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        static nodemask_t new_mems;
        bool cpus_updated, mems_updated;
        bool on_dfl = is_in_v2_mode();
+       struct tmpmasks tmp, *ptmp = NULL;
+
+       if (on_dfl && !alloc_cpumasks(NULL, &tmp))
+               ptmp = &tmp;
 
        mutex_lock(&cpuset_mutex);
 
@@ -2769,6 +3092,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        cpumask_copy(&new_cpus, cpu_active_mask);
        new_mems = node_states[N_MEMORY];
 
+       /*
+        * If subparts_cpus is populated, it is likely that the check below
+        * will produce a false positive on cpus_updated when the cpu list
+        * isn't changed. It is extra work, but it is better to be safe.
+        */
        cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
        mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
 
@@ -2777,6 +3105,22 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
                spin_lock_irq(&callback_lock);
                if (!on_dfl)
                        cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+               /*
+                * Make sure that CPUs allocated to child partitions
+                * do not show up in effective_cpus. If no CPU is left,
+                * we clear the subparts_cpus & let the child partitions
+                * fight for the CPUs again.
+                */
+               if (top_cpuset.nr_subparts_cpus) {
+                       if (cpumask_subset(&new_cpus,
+                                          top_cpuset.subparts_cpus)) {
+                               top_cpuset.nr_subparts_cpus = 0;
+                               cpumask_clear(top_cpuset.subparts_cpus);
+                       } else {
+                               cpumask_andnot(&new_cpus, &new_cpus,
+                                              top_cpuset.subparts_cpus);
+                       }
+               }
                cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
                spin_unlock_irq(&callback_lock);
                /* we don't mess with cpumasks of tasks in top_cpuset */
@@ -2805,7 +3149,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
                                continue;
                        rcu_read_unlock();
 
-                       cpuset_hotplug_update_tasks(cs);
+                       cpuset_hotplug_update_tasks(cs, ptmp);
 
                        rcu_read_lock();
                        css_put(&cs->css);
@@ -2818,6 +3162,8 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
                force_rebuild = false;
                rebuild_sched_domains();
        }
+
+       free_cpumasks(NULL, ptmp);
 }
 
 void cpuset_update_active_cpus(void)
@@ -3128,9 +3474,9 @@ void cpuset_print_current_mems_allowed(void)
        rcu_read_lock();
 
        cgrp = task_cs(current)->css.cgroup;
-       pr_info("%s cpuset=", current->comm);
+       pr_cont(",cpuset=");
        pr_cont_cgroup_name(cgrp);
-       pr_cont(" mems_allowed=%*pbl\n",
+       pr_cont(",mems_allowed=%*pbl",
                nodemask_pr_args(&current->mems_allowed));
 
        rcu_read_unlock();