dm table: stack 'chunk_sectors' limit to account for target-specific splitting
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
index 5edc307..704345e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/lcm.h>
 #include <linux/blk-mq.h>
 #include <linux/mount.h>
 #include <linux/dax.h>
@@ -860,10 +861,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
                        sector_t start, sector_t len, void *data)
 {
-       int blocksize = *(int *) data;
+       int blocksize = *(int *) data, id;
+       bool rc;
 
-       return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
-                                      start, len);
+       id = dax_read_lock();
+       rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+       dax_read_unlock(id);
+
+       return rc;
 }
 
 /* Check devices support synchronous DAX */
@@ -903,7 +908,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
        struct request_queue *q = bdev_get_queue(bdev);
 
        /* request-based cannot stack on partitions! */
-       if (bdev != bdev->bd_contains)
+       if (bdev_is_partition(bdev))
                return false;
 
        return queue_is_mq(q);
@@ -1502,6 +1507,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
                        zone_sectors = ti_limits.chunk_sectors;
                }
 
+               /* Stack chunk_sectors if target-specific splitting is required */
+               if (ti->max_io_len)
+                       ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
+                                                              ti_limits.chunk_sectors);
                /* Set I/O hints portion of queue limits */
                if (ti->type->io_hints)
                        ti->type->io_hints(ti, &ti_limits);
@@ -1748,6 +1757,33 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
        return true;
 }
 
+static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
+                                    sector_t start, sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && !blk_queue_nowait(q);
+}
+
+static bool dm_table_supports_nowait(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i = 0;
+
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+
+               if (!dm_target_supports_nowait(ti->type))
+                       return false;
+
+               if (!ti->type->iterate_devices ||
+                   ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
+                       return false;
+       }
+
+       return true;
+}
+
 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
                                      sector_t start, sector_t len, void *data)
 {
@@ -1815,7 +1851,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
 {
        struct request_queue *q = bdev_get_queue(dev->bdev);
 
-       return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+       return q && blk_queue_stable_writes(q);
 }
 
 /*
@@ -1850,6 +1886,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
         */
        q->limits = *limits;
 
+       if (dm_table_supports_nowait(t))
+               blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
+
        if (!dm_table_supports_discards(t)) {
                blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                /* Must also clear discard limits... */
@@ -1900,9 +1941,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
         * because they do their own checksumming.
         */
        if (dm_table_requires_stable_pages(t))
-               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+               blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
        else
-               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+               blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
 
        /*
         * Determine whether or not this queue's I/O timings contribute
@@ -1925,8 +1966,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        }
 #endif
 
-       /* Allow reads to exceed readahead limits */
-       q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
+       blk_queue_update_readahead(q);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)