blkcg: convert blkg_lookup_create to find closest blkg
[linux-2.6-microblaze.git] / block / blk-cgroup.c
index 694595b..e9e3a95 100644 (file)
@@ -259,7 +259,7 @@ err_free_blkg:
 }
 
 /**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
  * @blkcg: blkcg of interest
  * @q: request_queue of interest
  *
@@ -268,12 +268,11 @@ err_free_blkg:
  * that all non-root blkg's have access to the parent blkg.  This function
  * should be called under RCU read lock and @q->queue_lock.
  *
- * Returns pointer to the looked up or created blkg on success, ERR_PTR()
- * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
- * dead and bypassing, returns ERR_PTR(-EBUSY).
+ * Returns the blkg or the closest blkg if blkg_create fails as it walks
+ * down from root.
  */
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
-                                   struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+                                     struct request_queue *q)
 {
        struct blkcg_gq *blkg;
 
@@ -285,7 +284,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
         * we shouldn't allow anything to go through for a bypassing queue.
         */
        if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
+               return q->root_blkg;
 
        blkg = __blkg_lookup(blkcg, q, true);
        if (blkg)
@@ -293,45 +292,63 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 
        /*
         * Create blkgs walking down from blkcg_root to @blkcg, so that all
-        * non-root blkgs have access to their parents.
+        * non-root blkgs have access to their parents.  Returns the closest
+        * blkg to the intended blkg should blkg_create() fail.
         */
        while (true) {
                struct blkcg *pos = blkcg;
                struct blkcg *parent = blkcg_parent(blkcg);
-
-               while (parent && !__blkg_lookup(parent, q, false)) {
+               struct blkcg_gq *ret_blkg = q->root_blkg;
+
+               while (parent) {
+                       blkg = __blkg_lookup(parent, q, false);
+                       if (blkg) {
+                               /* remember closest blkg */
+                               ret_blkg = blkg;
+                               break;
+                       }
                        pos = parent;
                        parent = blkcg_parent(parent);
                }
 
                blkg = blkg_create(pos, q, NULL);
-               if (pos == blkcg || IS_ERR(blkg))
+               if (IS_ERR(blkg))
+                       return ret_blkg;
+               if (pos == blkcg)
                        return blkg;
        }
 }
 
-static void blkg_pd_offline(struct blkcg_gq *blkg)
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+                                   struct request_queue *q)
 {
-       int i;
+       struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+       unsigned long flags;
 
-       lockdep_assert_held(blkg->q->queue_lock);
-       lockdep_assert_held(&blkg->blkcg->lock);
+       if (unlikely(!blkg)) {
+               spin_lock_irqsave(q->queue_lock, flags);
 
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
+               blkg = __blkg_lookup_create(blkcg, q);
 
-               if (blkg->pd[i] && !blkg->pd[i]->offline &&
-                   pol->pd_offline_fn) {
-                       pol->pd_offline_fn(blkg->pd[i]);
-                       blkg->pd[i]->offline = true;
-               }
+               spin_unlock_irqrestore(q->queue_lock, flags);
        }
+
+       return blkg;
 }
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
        struct blkcg_gq *parent = blkg->parent;
+       int i;
 
        lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
@@ -340,6 +357,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        WARN_ON_ONCE(list_empty(&blkg->q_node));
        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+
+               if (blkg->pd[i] && pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg->pd[i]);
+       }
+
        if (parent) {
                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +406,6 @@ static void blkg_destroy_all(struct request_queue *q)
                struct blkcg *blkcg = blkg->blkcg;
 
                spin_lock(&blkcg->lock);
-               blkg_pd_offline(blkg);
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
@@ -1053,59 +1076,64 @@ static struct cftype blkcg_legacy_files[] = {
        { }     /* terminate */
 };
 
+/*
+ * blkcg destruction is a three-stage process.
+ *
+ * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
+ *    which offlines writeback.  Here we tie the next stage of blkg destruction
+ *    to the completion of writeback associated with the blkcg.  This lets us
+ *    avoid punting potentially large amounts of outstanding writeback to root
+ *    while maintaining any ongoing policies.  The next stage is triggered when
+ *    the nr_cgwbs count goes to zero.
+ *
+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
+ *    and handles the destruction of blkgs.  Here the css reference held by
+ *    the blkg is put back eventually allowing blkcg_css_free() to be called.
+ *    This work may occur in cgwb_release_workfn() on the cgwb_release
+ *    workqueue.  Any submitted ios that fail to get the blkg ref will be
+ *    punted to the root_blkg.
+ *
+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
+ *    This finally frees the blkcg.
+ */
+
 /**
  * blkcg_css_offline - cgroup css_offline callback
  * @css: css of interest
  *
- * This function is called when @css is about to go away and responsible
- * for offlining all blkgs pd and killing all wbs associated with @css.
- * blkgs pd offline should be done while holding both q and blkcg locks.
- * As blkcg lock is nested inside q lock, this function performs reverse
- * double lock dancing.
- *
- * This is the blkcg counterpart of ioc_release_fn().
+ * This function is called when @css is about to go away.  Here the cgwbs are
+ * offlined first and only once writeback associated with the blkcg has
+ * finished do we start step 2 (see above).
  */
 static void blkcg_css_offline(struct cgroup_subsys_state *css)
 {
        struct blkcg *blkcg = css_to_blkcg(css);
-       struct blkcg_gq *blkg;
-
-       spin_lock_irq(&blkcg->lock);
-
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct request_queue *q = blkg->q;
-
-               if (spin_trylock(q->queue_lock)) {
-                       blkg_pd_offline(blkg);
-                       spin_unlock(q->queue_lock);
-               } else {
-                       spin_unlock_irq(&blkcg->lock);
-                       cpu_relax();
-                       spin_lock_irq(&blkcg->lock);
-               }
-       }
-
-       spin_unlock_irq(&blkcg->lock);
 
+       /* this prevents anyone from attaching or migrating to this blkcg */
        wb_blkcg_offline(blkcg);
+
+       /* put the base cgwb reference allowing step 2 to be triggered */
+       blkcg_cgwb_put(blkcg);
 }
 
 /**
- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
+ * blkcg_destroy_blkgs - responsible for shooting down blkgs
  * @blkcg: blkcg of interest
  *
- * This function is called when blkcg css is about to free and responsible for
- * destroying all blkgs associated with @blkcg.
- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
+ * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
  * is nested inside q lock, this function performs reverse double lock dancing.
+ * Destroying the blkgs releases the reference held on the blkcg's css allowing
+ * blkcg_css_free to eventually be called.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
  */
-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
+void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
        spin_lock_irq(&blkcg->lock);
+
        while (!hlist_empty(&blkcg->blkg_list)) {
                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
-                                                   struct blkcg_gq,
-                                                   blkcg_node);
+                                               struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
                if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1145,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
                        spin_lock_irq(&blkcg->lock);
                }
        }
+
        spin_unlock_irq(&blkcg->lock);
 }
 
@@ -1125,8 +1154,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
        struct blkcg *blkcg = css_to_blkcg(css);
        int i;
 
-       blkcg_destroy_all_blkgs(blkcg);
-
        mutex_lock(&blkcg_pol_mutex);
 
        list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1216,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
+       refcount_set(&blkcg->cgwb_refcnt, 1);
 #endif
        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
 
@@ -1480,11 +1508,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
                if (blkg->pd[pol->plid]) {
-                       if (!blkg->pd[pol->plid]->offline &&
-                           pol->pd_offline_fn) {
+                       if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
-                               blkg->pd[pol->plid]->offline = true;
-                       }
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }