block: add QUEUE_FLAG_NOWAIT
[linux-2.6-microblaze.git] / block / blk-core.c
index 11661f5..4884f1e 100644 (file)
@@ -116,8 +116,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->__sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
-       rq->tag = -1;
-       rq->internal_tag = -1;
+       rq->tag = BLK_MQ_NO_TAG;
+       rq->internal_tag = BLK_MQ_NO_TAG;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
        refcount_set(&rq->ref, 1);
@@ -538,11 +538,10 @@ struct request_queue *blk_alloc_queue(int node_id)
        if (!q->stats)
                goto fail_stats;
 
-       q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
-       q->backing_dev_info->io_pages = VM_READAHEAD_PAGES;
-       q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
        q->node = node_id;
 
+       atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
+
        timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
@@ -815,9 +814,9 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 
        /*
         * For a REQ_NOWAIT based request, return -EOPNOTSUPP
-        * if queue is not a request based queue.
+        * if queue does not support NOWAIT.
         */
-       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
+       if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
                goto not_supported;
 
        if (should_fail_bio(bio))
@@ -1145,14 +1144,28 @@ EXPORT_SYMBOL(submit_bio);
  *    limits when retrying requests on other queues. Those requests need
  *    to be checked against the new queue limits again during dispatch.
  */
-static int blk_cloned_rq_check_limits(struct request_queue *q,
+static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
                                      struct request *rq)
 {
-       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
+       unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
+
+       if (blk_rq_sectors(rq) > max_sectors) {
+               /*
+                * SCSI device does not have a good way to return if
+                * Write Same/Zero is actually supported. If a device rejects
+                * a non-read/write command (discard, write same,etc.) the
+                * low-level device driver will set the relevant queue limit to
+                * 0 to prevent blk-lib from issuing more of the offending
+                * operations. Commands queued prior to the queue limit being
+                * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
+                * errors being propagated to upper layers.
+                */
+               if (max_sectors == 0)
+                       return BLK_STS_NOTSUPP;
+
                printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
-                       __func__, blk_rq_sectors(rq),
-                       blk_queue_get_max_sectors(q, req_op(rq)));
-               return -EIO;
+                       __func__, blk_rq_sectors(rq), max_sectors);
+               return BLK_STS_IOERR;
        }
 
        /*
@@ -1165,10 +1178,10 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
        if (rq->nr_phys_segments > queue_max_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
                        __func__, rq->nr_phys_segments, queue_max_segments(q));
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
 /**
@@ -1178,8 +1191,11 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  */
 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
-       if (blk_cloned_rq_check_limits(q, rq))
-               return BLK_STS_IOERR;
+       blk_status_t ret;
+
+       ret = blk_cloned_rq_check_limits(q, rq);
+       if (ret != BLK_STS_OK)
+               return ret;
 
        if (rq->rq_disk &&
            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
@@ -1305,10 +1321,9 @@ void blk_account_io_start(struct request *rq)
        part_stat_unlock();
 }
 
-unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
-               unsigned int op)
+static unsigned long __part_start_io_acct(struct hd_struct *part,
+                                         unsigned int sectors, unsigned int op)
 {
-       struct hd_struct *part = &disk->part0;
        const int sgrp = op_stat_group(op);
        unsigned long now = READ_ONCE(jiffies);
 
@@ -1321,12 +1336,26 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
 
        return now;
 }
+
+unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
+                                struct bio *bio)
+{
+       *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
+
+       return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
+}
+EXPORT_SYMBOL_GPL(part_start_io_acct);
+
+unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
+                                unsigned int op)
+{
+       return __part_start_io_acct(&disk->part0, sectors, op);
+}
 EXPORT_SYMBOL(disk_start_io_acct);
 
-void disk_end_io_acct(struct gendisk *disk, unsigned int op,
-               unsigned long start_time)
+static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
+                              unsigned long start_time)
 {
-       struct hd_struct *part = &disk->part0;
        const int sgrp = op_stat_group(op);
        unsigned long now = READ_ONCE(jiffies);
        unsigned long duration = now - start_time;
@@ -1337,6 +1366,20 @@ void disk_end_io_acct(struct gendisk *disk, unsigned int op,
        part_stat_local_dec(part, in_flight[op_is_write(op)]);
        part_stat_unlock();
 }
+
+void part_end_io_acct(struct hd_struct *part, struct bio *bio,
+                     unsigned long start_time)
+{
+       __part_end_io_acct(part, bio_op(bio), start_time);
+       hd_struct_put(part);
+}
+EXPORT_SYMBOL_GPL(part_end_io_acct);
+
+void disk_end_io_acct(struct gendisk *disk, unsigned int op,
+                     unsigned long start_time)
+{
+       __part_end_io_acct(&disk->part0, op, start_time);
+}
 EXPORT_SYMBOL(disk_end_io_acct);
 
 /*