__dlm_lockres_reserve_ast(res);
        spin_unlock(&res->spinlock);
 
-       /* now flush all the pending asts.. hang out for a bit */
+       /* now flush all the pending asts */
        dlm_kick_thread(dlm, res);
+       /* before waiting on DIRTY, block processes which may
+        * try to dirty the lockres before MIGRATING is set */
+       spin_lock(&res->spinlock);
+       BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
+       res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
+       spin_unlock(&res->spinlock);
+       /* now wait on any pending asts and the DIRTY state */
        wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
        dlm_lockres_release_ast(dlm, res);
 
                mlog(0, "trying again...\n");
                goto again;
        }
+       /* now that we are sure the MIGRATING state is there, drop
+        * the unneded state which blocked threads trying to DIRTY */
+       spin_lock(&res->spinlock);
+       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+       BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+       spin_unlock(&res->spinlock);
 
        /* did the target go down or die? */
        spin_lock(&dlm->spinlock);
 
 int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
        if (!__dlm_lockres_has_locks(res) &&
-           list_empty(&res->dirty)) {
+           (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
                /* try not to scan the bitmap unless the first two
                 * conditions are already true */
                int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
        assert_spin_locked(&res->spinlock);
 
        /* don't shuffle secondary queues */
-       if ((res->owner == dlm->node_num) &&
-           !(res->state & DLM_LOCK_RES_DIRTY)) {
-               /* ref for dirty_list */
-               dlm_lockres_get(res);
-               list_add_tail(&res->dirty, &dlm->dirty_list);
-               res->state |= DLM_LOCK_RES_DIRTY;
+       if ((res->owner == dlm->node_num)) {
+               if (res->state & (DLM_LOCK_RES_MIGRATING |
+                                 DLM_LOCK_RES_BLOCK_DIRTY))
+                   return;
+
+               if (list_empty(&res->dirty)) {
+                       /* ref for dirty_list */
+                       dlm_lockres_get(res);
+                       list_add_tail(&res->dirty, &dlm->dirty_list);
+                       res->state |= DLM_LOCK_RES_DIRTY;
+               }
        }
 }
 
                        dlm_lockres_get(res);
 
                        spin_lock(&res->spinlock);
-                       res->state &= ~DLM_LOCK_RES_DIRTY;
+                       /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
                        list_del_init(&res->dirty);
                        spin_unlock(&res->spinlock);
                        spin_unlock(&dlm->spinlock);
                        /* it is now ok to move lockreses in these states
                         * to the dirty list, assuming that they will only be
                         * dirty for a short while. */
+                       BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
                        if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
-                                         DLM_LOCK_RES_MIGRATING |
                                          DLM_LOCK_RES_RECOVERING)) {
                                /* move it to the tail and keep going */
+                               res->state &= ~DLM_LOCK_RES_DIRTY;
                                spin_unlock(&res->spinlock);
                                mlog(0, "delaying list shuffling for in-"
                                     "progress lockres %.*s, state=%d\n",
 
                        /* called while holding lockres lock */
                        dlm_shuffle_lists(dlm, res);
+                       res->state &= ~DLM_LOCK_RES_DIRTY;
                        spin_unlock(&res->spinlock);
 
                        dlm_lockres_calc_usage(dlm, res);
                        /* if the lock was in-progress, stick
                         * it on the back of the list */
                        if (delay) {
-                               /* ref for dirty_list */
-                               dlm_lockres_get(res);
                                spin_lock(&res->spinlock);
-                               list_add_tail(&res->dirty, &dlm->dirty_list);
-                               res->state |= DLM_LOCK_RES_DIRTY;
+                               __dlm_dirty_lockres(dlm, res);
                                spin_unlock(&res->spinlock);
                        }
                        dlm_lockres_put(res);