Merge tag 'for-linus-20180616' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jun 2018 20:37:55 +0000 (05:37 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jun 2018 20:37:55 +0000 (05:37 +0900)
Pull block fixes from Jens Axboe:
 "A collection of fixes that should go into -rc1. This contains:

   - bsg_open vs bsg_unregister race fix (Anatoliy)

   - NVMe pull request from Christoph, with fixes for regressions in
     this window, FC connect/reconnect path code unification, and a
     trace point addition.

   - timeout fix (Christoph)

   - remove a few unused functions (Christoph)

   - blk-mq tag_set reinit fix (Roman)"

* tag 'for-linus-20180616' of git://git.kernel.dk/linux-block:
  bsg: fix race of bsg_open and bsg_unregister
  block: remov blk_queue_invalidate_tags
  nvme-fabrics: fix and refine state checks in __nvmf_check_ready
  nvme-fabrics: handle the admin-only case properly in nvmf_check_ready
  nvme-fabrics: refactor queue ready check
  blk-mq: remove blk_mq_tagset_iter
  nvme: remove nvme_reinit_tagset
  nvme-fc: fix nulling of queue data on reconnect
  nvme-fc: remove reinit_request routine
  blk-mq: don't time out requests again that are in the timeout handler
  nvme-fc: change controllers first connect to use reconnect path
  nvme: don't rely on the changed namespace list log
  nvmet: free smart-log buffer after use
  nvme-rdma: fix error flow during mapping request data
  nvme: add bio remapping tracepoint
  nvme: fix NULL pointer dereference in nvme_init_subsystem
  blk-mq: reinit q->tag_set_list entry only after grace period

1  2 
block/blk-mq.c
block/blk-tag.c

diff --combined block/blk-mq.c
@@@ -671,6 -671,7 +671,7 @@@ static void __blk_mq_requeue_request(st
  
        if (blk_mq_request_started(rq)) {
                WRITE_ONCE(rq->state, MQ_RQ_IDLE);
+               rq->rq_flags &= ~RQF_TIMED_OUT;
                if (q->dma_drain_size && blk_rq_bytes(rq))
                        rq->nr_phys_segments--;
        }
@@@ -770,6 -771,7 +771,7 @@@ EXPORT_SYMBOL(blk_mq_tag_to_rq)
  
  static void blk_mq_rq_timed_out(struct request *req, bool reserved)
  {
+       req->rq_flags |= RQF_TIMED_OUT;
        if (req->q->mq_ops->timeout) {
                enum blk_eh_timer_return ret;
  
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
  
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
  }
  
@@@ -788,6 -791,8 +791,8 @@@ static bool blk_mq_req_expired(struct r
  
        if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
                return false;
+       if (rq->rq_flags & RQF_TIMED_OUT)
+               return false;
  
        deadline = blk_rq_deadline(rq);
        if (time_after_eq(jiffies, deadline))
@@@ -1903,7 -1908,7 +1908,7 @@@ struct blk_mq_tags *blk_mq_alloc_rq_map
        if (!tags)
                return NULL;
  
 -      tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
 +      tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
                                 node);
        if (!tags->rqs) {
                return NULL;
        }
  
 -      tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
 -                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
 -                               node);
 +      tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
 +                                      GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
 +                                      node);
        if (!tags->static_rqs) {
                kfree(tags->rqs);
                blk_mq_free_tags(tags);
@@@ -2349,7 -2354,6 +2354,6 @@@ static void blk_mq_del_queue_tag_set(st
  
        mutex_lock(&set->tag_list_lock);
        list_del_rcu(&q->tag_set_list);
-       INIT_LIST_HEAD(&q->tag_set_list);
        if (list_is_singular(&set->tag_list)) {
                /* just transitioned to unshared */
                set->flags &= ~BLK_MQ_F_TAG_SHARED;
                blk_mq_update_tag_set_depth(set, false);
        }
        mutex_unlock(&set->tag_list_lock);
        synchronize_rcu();
+       INIT_LIST_HEAD(&q->tag_set_list);
  }
  
  static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@@ -2522,7 -2526,7 +2526,7 @@@ struct request_queue *blk_mq_init_alloc
        /* init q->mq_kobj and sw queues' kobjects */
        blk_mq_sysfs_init(q);
  
 -      q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
 +      q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
                                                GFP_KERNEL, set->numa_node);
        if (!q->queue_hw_ctx)
                goto err_percpu;
@@@ -2741,14 -2745,14 +2745,14 @@@ int blk_mq_alloc_tag_set(struct blk_mq_
        if (set->nr_hw_queues > nr_cpu_ids)
                set->nr_hw_queues = nr_cpu_ids;
  
 -      set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
 +      set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
                return -ENOMEM;
  
        ret = -ENOMEM;
 -      set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
 -                      GFP_KERNEL, set->numa_node);
 +      set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
 +                                 GFP_KERNEL, set->numa_node);
        if (!set->mq_map)
                goto out_free_tags;
  
diff --combined block/blk-tag.c
@@@ -99,12 -99,12 +99,12 @@@ init_tag_map(struct request_queue *q, s
                       __func__, depth);
        }
  
 -      tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
 +      tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
        if (!tag_index)
                goto fail;
  
        nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
 -      tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
 +      tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
  
@@@ -188,7 -188,6 +188,6 @@@ int blk_queue_init_tags(struct request_
         */
        q->queue_tags = tags;
        queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
-       INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
  }
  EXPORT_SYMBOL(blk_queue_init_tags);
@@@ -374,27 -373,6 +373,6 @@@ int blk_queue_start_tag(struct request_
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blk_start_request(rq);
-       list_add(&rq->queuelist, &q->tag_busy_list);
        return 0;
  }
  EXPORT_SYMBOL(blk_queue_start_tag);
- /**
-  * blk_queue_invalidate_tags - invalidate all pending tags
-  * @q:  the request queue for the device
-  *
-  *  Description:
-  *   Hardware conditions may dictate a need to stop all pending requests.
-  *   In this case, we will safely clear the block side of the tag queue and
-  *   readd all requests to the request queue in the right order.
-  **/
- void blk_queue_invalidate_tags(struct request_queue *q)
- {
-       struct list_head *tmp, *n;
-       lockdep_assert_held(q->queue_lock);
-       list_for_each_safe(tmp, n, &q->tag_busy_list)
-               blk_requeue_request(q, list_entry_rq(tmp));
- }
- EXPORT_SYMBOL(blk_queue_invalidate_tags);