X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=arch%2Farm%2Finclude%2Fasm%2Fmmu_context.h;h=94e265cb5146ddc02569017e6c710f987d84a8f0;hb=7fec1b57b8a925d83c194f995f83d9f8442fd48e;hp=a0b3cac0547c0a9949c30cc919adcf5e08fcf500;hpb=adb3b1f3fc1c6edb501808ebf80a81e81c52eb73;p=linux-2.6-microblaze.git diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a0b3cac0547c..94e265cb5146 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -49,39 +49,80 @@ DECLARE_PER_CPU(struct mm_struct *, current_mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +void cpu_set_reserved_ttbr0(void); -static inline void check_context(struct mm_struct *mm) +static inline void switch_new_context(struct mm_struct *mm) { - /* - * This code is executed with interrupts enabled. Therefore, - * mm->context.id cannot be updated to the latest ASID version - * on a different CPU (and condition below not triggered) - * without first getting an IPI to reset the context. The - * alternative is to take a read_lock on mm->context.id_lock - * (after changing its type to rwlock_t). - */ - if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - __new_context(mm); + unsigned long flags; + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); +} + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); + + if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) + /* + * The ASID is from the current generation, just switch to the + * new pgd. This condition is only true for calls from + * context_switch() and interrupts are already disabled. + */ + cpu_switch_mm(mm->pgd, mm); + else if (irqs_disabled()) + /* + * Defer the new ASID allocation until after the context + * switch critical region since __new_context() cannot be + * called with interrupts disabled (it sends IPIs). + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + /* + * That is a direct call to switch_mm() or activate_mm() with + * interrupts enabled and a new context. + */ + switch_new_context(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) -#else +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) + switch_new_context(current->mm); +} -static inline void check_context(struct mm_struct *mm) +#else /* !CONFIG_CPU_HAS_ASID */ + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) { #ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + cpu_switch_mm(mm->pgd, mm); #endif } #define init_new_context(tsk,mm) 0 -#endif +#define finish_arch_post_lock_switch() do { } while (0) + +#endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) @@ -123,8 +164,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); *crt_mm = next; #endif - check_context(next); - cpu_switch_mm(next->pgd, next); + check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); }