mmu_notifiers: don't invalidate secondary TLBs as part of mmu_notifier_invalidate_ran...
[linux-2.6-microblaze.git] / mm / mmu_notifier.c
index b7ad155..453a156 100644 (file)
@@ -551,7 +551,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 
 static void
 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
-                       struct mmu_notifier_range *range, bool only_end)
+                       struct mmu_notifier_range *range)
 {
        struct mmu_notifier *subscription;
        int id;
@@ -559,24 +559,6 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
                                 srcu_read_lock_held(&srcu)) {
-               /*
-                * Call invalidate_range here too to avoid the need for the
-                * subsystem of having to register an invalidate_range_end
-                * call-back when there is invalidate_range already. Usually a
-                * subsystem registers either invalidate_range_start()/end() or
-                * invalidate_range(), so this will be no additional overhead
-                * (besides the pointer check).
-                *
-                * We skip call to invalidate_range() if we know it is safe ie
-                * call site use mmu_notifier_invalidate_range_only_end() which
-                * is safe to do when we know that a call to invalidate_range()
-                * already happen under page table lock.
-                */
-               if (!only_end && subscription->ops->invalidate_range)
-                       subscription->ops->invalidate_range(subscription,
-                                                           range->mm,
-                                                           range->start,
-                                                           range->end);
                if (subscription->ops->invalidate_range_end) {
                        if (!mmu_notifier_range_blockable(range))
                                non_block_start();
@@ -589,8 +571,7 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        srcu_read_unlock(&srcu, id);
 }
 
-void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
-                                        bool only_end)
+void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
 {
        struct mmu_notifier_subscriptions *subscriptions =
                range->mm->notifier_subscriptions;
@@ -600,7 +581,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
                mn_itree_inv_end(subscriptions);
 
        if (!hlist_empty(&subscriptions->list))
-               mn_hlist_invalidate_end(subscriptions, range, only_end);
+               mn_hlist_invalidate_end(subscriptions, range);
        lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 }