block: Use the queue_flag_*() functions instead of open-coding these
authorBart Van Assche <bart.vanassche@wdc.com>
Thu, 8 Mar 2018 01:10:03 +0000 (17:10 -0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 8 Mar 2018 21:13:48 +0000 (14:13 -0700)
Except for changing the atomic queue flag manipulations that are
protected by the queue lock into non-atomic manipulations, this
patch does not change any functionality.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk-settings.c
block/blk-stat.c

index 6febc69..241b730 100644 (file)
@@ -994,7 +994,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
         * registered by blk_register_queue().
         */
        q->bypass_depth = 1;
-       __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+       queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
 
        init_waitqueue_head(&q->mq_freeze_wq);
 
index 7533684..e70cc7d 100644 (file)
@@ -2678,7 +2678,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
        if (!(set->flags & BLK_MQ_F_SG_MERGE))
-               q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
+               queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
        q->sg_reserved_size = INT_MAX;
 
index 48ebe6b..7f719da 100644 (file)
@@ -861,9 +861,9 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
        spin_lock_irq(q->queue_lock);
        if (queueable)
-               clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
        else
-               set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+               queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
        spin_unlock_irq(q->queue_lock);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
index 28003bf..b664aa6 100644 (file)
@@ -152,7 +152,7 @@ void blk_stat_add_callback(struct request_queue *q,
 
        spin_lock(&q->stats->lock);
        list_add_tail_rcu(&cb->list, &q->stats->callbacks);
-       set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+       queue_flag_set(QUEUE_FLAG_STATS, q);
        spin_unlock(&q->stats->lock);
 }
 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
@@ -163,7 +163,7 @@ void blk_stat_remove_callback(struct request_queue *q,
        spin_lock(&q->stats->lock);
        list_del_rcu(&cb->list);
        if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
-               clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_STATS, q);
        spin_unlock(&q->stats->lock);
 
        del_timer_sync(&cb->timer);
@@ -191,7 +191,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
 {
        spin_lock(&q->stats->lock);
        q->stats->enable_accounting = true;
-       set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+       queue_flag_set(QUEUE_FLAG_STATS, q);
        spin_unlock(&q->stats->lock);
 }