Merge tag 'xfs-5.11-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / mm / highmem.c
index 1352a27..c3a9ea7 100644 (file)
 #include <asm/tlbflush.h>
 #include <linux/vmalloc.h>
 
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-DEFINE_PER_CPU(int, __kmap_atomic_idx);
-#endif
-
 /*
  * Virtual_count is not a pure "count".
  *  0 means that it is not mapped, and has not been mapped
@@ -108,9 +104,7 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
 atomic_long_t _totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(_totalhigh_pages);
 
-EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
-
-unsigned int nr_free_highpages (void)
+unsigned int __nr_free_highpages (void)
 {
        struct zone *zone;
        unsigned int pages = 0;
@@ -147,7 +141,7 @@ pte_t * pkmap_page_table;
                do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
 #endif
 
-struct page *kmap_to_page(void *vaddr)
+struct page *__kmap_to_page(void *vaddr)
 {
        unsigned long addr = (unsigned long)vaddr;
 
@@ -158,7 +152,7 @@ struct page *kmap_to_page(void *vaddr)
 
        return virt_to_page(addr);
 }
-EXPORT_SYMBOL(kmap_to_page);
+EXPORT_SYMBOL(__kmap_to_page);
 
 static void flush_all_zero_pkmaps(void)
 {
@@ -200,10 +194,7 @@ static void flush_all_zero_pkmaps(void)
                flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 }
 
-/**
- * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
- */
-void kmap_flush_unused(void)
+void __kmap_flush_unused(void)
 {
        lock_kmap();
        flush_all_zero_pkmaps();
@@ -367,9 +358,312 @@ void kunmap_high(struct page *page)
        if (need_wakeup)
                wake_up(pkmap_map_wait);
 }
-
 EXPORT_SYMBOL(kunmap_high);
-#endif /* CONFIG_HIGHMEM */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+               unsigned start2, unsigned end2)
+{
+       unsigned int i;
+
+       BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+
+       for (i = 0; i < compound_nr(page); i++) {
+               void *kaddr = NULL;
+
+               if (start1 < PAGE_SIZE || start2 < PAGE_SIZE)
+                       kaddr = kmap_atomic(page + i);
+
+               if (start1 >= PAGE_SIZE) {
+                       start1 -= PAGE_SIZE;
+                       end1 -= PAGE_SIZE;
+               } else {
+                       unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
+
+                       if (end1 > start1)
+                               memset(kaddr + start1, 0, this_end - start1);
+                       end1 -= this_end;
+                       start1 = 0;
+               }
+
+               if (start2 >= PAGE_SIZE) {
+                       start2 -= PAGE_SIZE;
+                       end2 -= PAGE_SIZE;
+               } else {
+                       unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
+
+                       if (end2 > start2)
+                               memset(kaddr + start2, 0, this_end - start2);
+                       end2 -= this_end;
+                       start2 = 0;
+               }
+
+               if (kaddr) {
+                       kunmap_atomic(kaddr);
+                       flush_dcache_page(page + i);
+               }
+
+               if (!end1 && !end2)
+                       break;
+       }
+
+       BUG_ON((start1 | start2 | end1 | end2) != 0);
+}
+EXPORT_SYMBOL(zero_user_segments);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_HIGHMEM */
+
+#ifdef CONFIG_KMAP_LOCAL
+
+#include <asm/kmap_size.h>
+
+/*
+ * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
+ * slot is unused which acts as a guard page
+ */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL
+# define KM_INCR       2
+#else
+# define KM_INCR       1
+#endif
+
+static inline int kmap_local_idx_push(void)
+{
+       WARN_ON_ONCE(in_irq() && !irqs_disabled());
+       current->kmap_ctrl.idx += KM_INCR;
+       BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
+       return current->kmap_ctrl.idx - 1;
+}
+
+static inline int kmap_local_idx(void)
+{
+       return current->kmap_ctrl.idx - 1;
+}
+
+static inline void kmap_local_idx_pop(void)
+{
+       current->kmap_ctrl.idx -= KM_INCR;
+       BUG_ON(current->kmap_ctrl.idx < 0);
+}
+
+#ifndef arch_kmap_local_post_map
+# define arch_kmap_local_post_map(vaddr, pteval)       do { } while (0)
+#endif
+
+#ifndef arch_kmap_local_pre_unmap
+# define arch_kmap_local_pre_unmap(vaddr)              do { } while (0)
+#endif
+
+#ifndef arch_kmap_local_post_unmap
+# define arch_kmap_local_post_unmap(vaddr)             do { } while (0)
+#endif
+
+#ifndef arch_kmap_local_map_idx
+#define arch_kmap_local_map_idx(idx, pfn)      kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_unmap_idx
+#define arch_kmap_local_unmap_idx(idx, vaddr)  kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_high_get
+static inline void *arch_kmap_local_high_get(struct page *page)
+{
+       return NULL;
+}
+#endif
+
+/* Unmap a local mapping which was obtained by kmap_high_get() */
+static inline bool kmap_high_unmap_local(unsigned long vaddr)
+{
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+       if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+               kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+               return true;
+       }
+#endif
+       return false;
+}
+
+static inline int kmap_local_calc_idx(int idx)
+{
+       return idx + KM_MAX_IDX * smp_processor_id();
+}
+
+static pte_t *__kmap_pte;
+
+static pte_t *kmap_get_pte(void)
+{
+       if (!__kmap_pte)
+               __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
+       return __kmap_pte;
+}
+
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+       pte_t pteval, *kmap_pte = kmap_get_pte();
+       unsigned long vaddr;
+       int idx;
+
+       /*
+        * Disable migration so resulting virtual address is stable
+        * accross preemption.
+        */
+       migrate_disable();
+       preempt_disable();
+       idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+       pteval = pfn_pte(pfn, prot);
+       set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+       arch_kmap_local_post_map(vaddr, pteval);
+       current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
+       preempt_enable();
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
+
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+       void *kmap;
+
+       /*
+        * To broaden the usage of the actual kmap_local() machinery always map
+        * pages when debugging is enabled and the architecture has no problems
+        * with alias mappings.
+        */
+       if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
+               return page_address(page);
+
+       /* Try kmap_high_get() if architecture has it enabled */
+       kmap = arch_kmap_local_high_get(page);
+       if (kmap)
+               return kmap;
+
+       return __kmap_local_pfn_prot(page_to_pfn(page), prot);
+}
+EXPORT_SYMBOL(__kmap_local_page_prot);
+
+void kunmap_local_indexed(void *vaddr)
+{
+       unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
+       pte_t *kmap_pte = kmap_get_pte();
+       int idx;
+
+       if (addr < __fix_to_virt(FIX_KMAP_END) ||
+           addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
+               if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
+                       /* This _should_ never happen! See above. */
+                       WARN_ON_ONCE(1);
+                       return;
+               }
+               /*
+                * Handle mappings which were obtained by kmap_high_get()
+                * first as the virtual address of such mappings is below
+                * PAGE_OFFSET. Warn for all other addresses which are in
+                * the user space part of the virtual address space.
+                */
+               if (!kmap_high_unmap_local(addr))
+                       WARN_ON_ONCE(addr < PAGE_OFFSET);
+               return;
+       }
+
+       preempt_disable();
+       idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
+       WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+       arch_kmap_local_pre_unmap(addr);
+       pte_clear(&init_mm, addr, kmap_pte - idx);
+       arch_kmap_local_post_unmap(addr);
+       current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
+       kmap_local_idx_pop();
+       preempt_enable();
+       migrate_enable();
+}
+EXPORT_SYMBOL(kunmap_local_indexed);
+
+/*
+ * Invoked before switch_to(). This is safe even when during or after
+ * clearing the maps an interrupt which needs a kmap_local happens because
+ * the task::kmap_ctrl.idx is not modified by the unmapping code so a
+ * nested kmap_local will use the next unused index and restore the index
+ * on unmap. The already cleared kmaps of the outgoing task are irrelevant
+ * because the interrupt context does not know about them. The same applies
+ * when scheduling back in for an interrupt which happens before the
+ * restore is complete.
+ */
+void __kmap_local_sched_out(void)
+{
+       struct task_struct *tsk = current;
+       pte_t *kmap_pte = kmap_get_pte();
+       int i;
+
+       /* Clear kmaps */
+       for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
+               pte_t pteval = tsk->kmap_ctrl.pteval[i];
+               unsigned long addr;
+               int idx;
+
+               /* With debug all even slots are unmapped and act as guard */
+               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+                       WARN_ON_ONCE(!pte_none(pteval));
+                       continue;
+               }
+               if (WARN_ON_ONCE(pte_none(pteval)))
+                       continue;
+
+               /*
+                * This is a horrible hack for XTENSA to calculate the
+                * coloured PTE index. Uses the PFN encoded into the pteval
+                * and the map index calculation because the actual mapped
+                * virtual address is not stored in task::kmap_ctrl.
+                * For any sane architecture this is optimized out.
+                */
+               idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
+
+               addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+               arch_kmap_local_pre_unmap(addr);
+               pte_clear(&init_mm, addr, kmap_pte - idx);
+               arch_kmap_local_post_unmap(addr);
+       }
+}
+
+void __kmap_local_sched_in(void)
+{
+       struct task_struct *tsk = current;
+       pte_t *kmap_pte = kmap_get_pte();
+       int i;
+
+       /* Restore kmaps */
+       for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
+               pte_t pteval = tsk->kmap_ctrl.pteval[i];
+               unsigned long addr;
+               int idx;
+
+               /* With debug all even slots are unmapped and act as guard */
+               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+                       WARN_ON_ONCE(!pte_none(pteval));
+                       continue;
+               }
+               if (WARN_ON_ONCE(pte_none(pteval)))
+                       continue;
+
+               /* See comment in __kmap_local_sched_out() */
+               idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
+               addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+               set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
+               arch_kmap_local_post_map(addr, pteval);
+       }
+}
+
+void kmap_local_fork(struct task_struct *tsk)
+{
+       if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
+               memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
+}
+
+#endif
 
 #if defined(HASHED_PAGE_VIRTUAL)