blkcg: always associate a bio with a blkg
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>
Tue, 11 Sep 2018 18:41:29 +0000 (14:41 -0400)
committerJens Axboe <axboe@kernel.dk>
Sat, 22 Sep 2018 02:29:06 +0000 (20:29 -0600)
Previously, blkg's were only assigned as needed by blk-iolatency and
blk-throttle. bio->css was also always being associated while blkg was
being looked up and then thrown away in blkcg_bio_issue_check.

This patch begins the cleanup of bio->css and bio->bi_blkg by always
associating a blkg in blkcg_bio_issue_check. This tries to create the
blkg, but if it is not possible, falls back to using the root_blkg of
the request_queue. Therefore, a bio will always be associated with a
blkg. The duplicate association logic is removed from blk-throttle and
blk-iolatency.

Signed-off-by: Dennis Zhou <dennisszhou@gmail.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bio.c
block/blk-iolatency.c
block/blk-throttle.c
include/linux/bio.h
include/linux/blk-cgroup.h

index bfd41e8..748d713 100644 (file)
@@ -2028,6 +2028,41 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
        return 0;
 }
 
+/**
+ * bio_associate_create_blkg - associate a bio with a blkg from q
+ * @q: request_queue where bio is going
+ * @bio: target bio
+ *
+ * Associate @bio with the blkg found from the bio's css and the request_queue.
+ * If one is not found, bio_lookup_blkg creates the blkg.
+ */
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
+{
+       struct blkcg *blkcg;
+       struct blkcg_gq *blkg;
+       int ret = 0;
+
+       /* someone has already associated this bio with a blkg */
+       if (bio->bi_blkg)
+               return ret;
+
+       rcu_read_lock();
+
+       bio_associate_blkcg(bio, NULL);
+       blkcg = bio_blkcg(bio);
+
+       if (!blkcg->css.parent) {
+               ret = bio_associate_blkg(bio, q->root_blkg);
+       } else {
+               blkg = blkg_lookup_create(blkcg, q);
+
+               ret = bio_associate_blkg(bio, blkg);
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
 /**
  * bio_disassociate_task - undo bio_associate_current()
  * @bio: target bio
@@ -2057,6 +2092,9 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
 {
        if (src->bi_css)
                WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+
+       if (src->bi_blkg)
+               bio_associate_blkg(dst, src->bi_blkg);
 }
 EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
 #endif /* CONFIG_BLK_CGROUP */
index ffde3ab..7337fbc 100644 (file)
@@ -392,34 +392,14 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
                                     spinlock_t *lock)
 {
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
-       struct blkcg *blkcg;
-       struct blkcg_gq *blkg;
-       struct request_queue *q = rqos->q;
+       struct blkcg_gq *blkg = bio->bi_blkg;
        bool issue_as_root = bio_issue_as_root_blkg(bio);
 
        if (!blk_iolatency_enabled(blkiolat))
                return;
 
-       rcu_read_lock();
-       bio_associate_blkcg(bio, NULL);
-       blkcg = bio_blkcg(bio);
-       blkg = blkg_lookup(blkcg, q);
-       if (unlikely(!blkg)) {
-               if (!lock)
-                       spin_lock_irq(q->queue_lock);
-               blkg = __blkg_lookup_create(blkcg, q);
-               if (IS_ERR(blkg))
-                       blkg = NULL;
-               if (!lock)
-                       spin_unlock_irq(q->queue_lock);
-       }
-       if (!blkg)
-               goto out;
-
        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-       bio_associate_blkg(bio, blkg);
-out:
-       rcu_read_unlock();
+
        while (blkg && blkg->parent) {
                struct iolatency_grp *iolat = blkg_to_lat(blkg);
                if (!iolat) {
index db1a3a2..e62ae50 100644 (file)
@@ -2118,9 +2118,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
 {
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-       /* fallback to root_blkg if we fail to get a blkg ref */
-       if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
-               bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 #endif
 }
@@ -2129,7 +2126,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
                    struct bio *bio)
 {
        struct throtl_qnode *qn = NULL;
-       struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
+       struct throtl_grp *tg = blkg_to_tg(blkg);
        struct throtl_service_queue *sq;
        bool rw = bio_data_dir(bio);
        bool throttled = false;
index 14b4fa2..829cd0b 100644 (file)
@@ -542,11 +542,14 @@ static inline int bio_associate_blkcg_from_page(struct bio *bio,
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
 int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
 void bio_disassociate_task(struct bio *bio);
 void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
 #else  /* CONFIG_BLK_CGROUP */
 static inline int bio_associate_blkcg(struct bio *bio,
                        struct cgroup_subsys_state *blkcg_css) { return 0; }
+static inline int bio_associate_create_blkg(struct request_queue *q,
+                                           struct bio *bio) { return 0; }
 static inline void bio_disassociate_task(struct bio *bio) { }
 static inline void bio_clone_blkcg_association(struct bio *dst,
                        struct bio *src) { }
index 1fbff1b..6e33ad1 100644 (file)
@@ -900,29 +900,17 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
 static inline bool blkcg_bio_issue_check(struct request_queue *q,
                                         struct bio *bio)
 {
-       struct blkcg *blkcg;
        struct blkcg_gq *blkg;
        bool throtl = false;
 
        rcu_read_lock();
 
-       /* associate blkcg if bio hasn't attached one */
-       bio_associate_blkcg(bio, NULL);
-       blkcg = bio_blkcg(bio);
-
-       blkg = blkg_lookup(blkcg, q);
-       if (unlikely(!blkg)) {
-               spin_lock_irq(q->queue_lock);
-               blkg = __blkg_lookup_create(blkcg, q);
-               if (IS_ERR(blkg))
-                       blkg = NULL;
-               spin_unlock_irq(q->queue_lock);
-       }
+       bio_associate_create_blkg(q, bio);
+       blkg = bio->bi_blkg;
 
        throtl = blk_throtl_bio(q, blkg, bio);
 
        if (!throtl) {
-               blkg = blkg ?: q->root_blkg;
                /*
                 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
                 * is a split bio and we would have already accounted for the