dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH
[linux-2.6-microblaze.git] / drivers / md / dm-mpath.c
index 78cff42..bf51758 100644 (file)
@@ -335,6 +335,8 @@ static int pg_init_all_paths(struct multipath *m)
 
 static void __switch_pg(struct multipath *m, struct priority_group *pg)
 {
+       lockdep_assert_held(&m->lock);
+
        m->current_pg = pg;
 
        /* Must we initialise the PG first, and queue I/O till it's ready? */
@@ -382,7 +384,9 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        unsigned bypassed = 1;
 
        if (!atomic_read(&m->nr_valid_paths)) {
+               spin_lock_irqsave(&m->lock, flags);
                clear_bit(MPATHF_QUEUE_IO, &m->flags);
+               spin_unlock_irqrestore(&m->lock, flags);
                goto failed;
        }
 
@@ -422,8 +426,11 @@ check_current_pg:
                                continue;
                        pgpath = choose_path_in_pg(m, pg, nr_bytes);
                        if (!IS_ERR_OR_NULL(pgpath)) {
-                               if (!bypassed)
+                               if (!bypassed) {
+                                       spin_lock_irqsave(&m->lock, flags);
                                        set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+                                       spin_unlock_irqrestore(&m->lock, flags);
+                               }
                                return pgpath;
                        }
                }
@@ -1614,12 +1621,16 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
                if (pgpath)
                        fail_path(pgpath);
 
-               if (atomic_read(&m->nr_valid_paths) == 0 &&
-                   !must_push_back_rq(m)) {
-                       if (error == BLK_STS_IOERR)
-                               dm_report_EIO(m);
-                       /* complete with the original error */
-                       r = DM_ENDIO_DONE;
+               if (!atomic_read(&m->nr_valid_paths)) {
+                       unsigned long flags;
+                       spin_lock_irqsave(&m->lock, flags);
+                       if (!must_push_back_rq(m)) {
+                               if (error == BLK_STS_IOERR)
+                                       dm_report_EIO(m);
+                               /* complete with the original error */
+                               r = DM_ENDIO_DONE;
+                       }
+                       spin_unlock_irqrestore(&m->lock, flags);
                }
        }
 
@@ -1649,22 +1660,26 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
        if (pgpath)
                fail_path(pgpath);
 
-       if (atomic_read(&m->nr_valid_paths) == 0 &&
-           !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-               if (__must_push_back(m)) {
-                       r = DM_ENDIO_REQUEUE;
-               } else {
-                       dm_report_EIO(m);
-                       *error = BLK_STS_IOERR;
+       if (!atomic_read(&m->nr_valid_paths)) {
+               spin_lock_irqsave(&m->lock, flags);
+               if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+                       if (__must_push_back(m)) {
+                               r = DM_ENDIO_REQUEUE;
+                       } else {
+                               dm_report_EIO(m);
+                               *error = BLK_STS_IOERR;
+                       }
+                       spin_unlock_irqrestore(&m->lock, flags);
+                       goto done;
                }
-               goto done;
+               spin_unlock_irqrestore(&m->lock, flags);
        }
 
        spin_lock_irqsave(&m->lock, flags);
        bio_list_add(&m->queued_bios, clone);
-       spin_unlock_irqrestore(&m->lock, flags);
        if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
                queue_work(kmultipathd, &m->process_queued_bios);
+       spin_unlock_irqrestore(&m->lock, flags);
 
        r = DM_ENDIO_INCOMPLETE;
 done:
@@ -1938,6 +1953,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
 {
        struct multipath *m = ti->private;
        struct pgpath *current_pgpath;
+       unsigned long flags;
        int r;
 
        current_pgpath = READ_ONCE(m->current_pgpath);
@@ -1954,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
                }
        } else {
                /* No path is available */
+               r = -EIO;
+               spin_lock_irqsave(&m->lock, flags);
                if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
                        r = -ENOTCONN;
-               else
-                       r = -EIO;
+               spin_unlock_irqrestore(&m->lock, flags);
        }
 
        if (r == -ENOTCONN) {
@@ -1965,8 +1982,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
                        /* Path status changed, redo selection */
                        (void) choose_pgpath(m, 0);
                }
+               spin_lock_irqsave(&m->lock, flags);
                if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
-                       pg_init_all_paths(m);
+                       (void) __pg_init_all_paths(m);
+               spin_unlock_irqrestore(&m->lock, flags);
                dm_table_run_md_queue_async(m->ti->table);
                process_queued_io_list(m);
        }
@@ -2026,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti)
                return true;
 
        /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
-       if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
-               return (m->queue_mode != DM_TYPE_REQUEST_BASED);
+       if (!atomic_read(&m->nr_valid_paths)) {
+               unsigned long flags;
+               spin_lock_irqsave(&m->lock, flags);
+               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+                       spin_unlock_irqrestore(&m->lock, flags);
+                       return (m->queue_mode != DM_TYPE_REQUEST_BASED);
+               }
+               spin_unlock_irqrestore(&m->lock, flags);
+       }
 
        /* Guess which priority_group will be used at next mapping time */
        pg = READ_ONCE(m->current_pg);