userfaultfd: hugetlbfs: add copy_huge_page_from_user for hugetlb userfaultfd support
[linux-2.6-microblaze.git] / include / linux / mm.h
index b84615b..6bc7f2c 100644 (file)
@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
 #define page_to_virt(x)        __va(PFN_PHYS(page_to_pfn(x)))
 #endif
 
+#ifndef lm_alias
+#define lm_alias(x)    __va(__pa_symbol(x))
+#endif
+
 /*
  * To prevent common memory management code establishing
  * a zero page mapping on a read fault.
@@ -281,6 +285,17 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_REMOTE      0x80    /* faulting for non current tsk/mm */
 #define FAULT_FLAG_INSTRUCTION  0x100  /* The fault was during an instruction fetch */
 
+#define FAULT_FLAG_TRACE \
+       { FAULT_FLAG_WRITE,             "WRITE" }, \
+       { FAULT_FLAG_MKWRITE,           "MKWRITE" }, \
+       { FAULT_FLAG_ALLOW_RETRY,       "ALLOW_RETRY" }, \
+       { FAULT_FLAG_RETRY_NOWAIT,      "RETRY_NOWAIT" }, \
+       { FAULT_FLAG_KILLABLE,          "KILLABLE" }, \
+       { FAULT_FLAG_TRIED,             "TRIED" }, \
+       { FAULT_FLAG_USER,              "USER" }, \
+       { FAULT_FLAG_REMOTE,            "REMOTE" }, \
+       { FAULT_FLAG_INSTRUCTION,       "INSTRUCTION" }
+
 /*
  * vm_fault is filled by the the pagefault handler and passed to the vma's
  * ->fault function. The vma's ->fault is responsible for returning a bitmask
@@ -336,8 +351,7 @@ struct vm_operations_struct {
        void (*close)(struct vm_area_struct * area);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
-       int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
-                                               pmd_t *, unsigned int flags);
+       int (*pmd_fault)(struct vm_fault *vmf);
        void (*map_pages)(struct vm_fault *vmf,
                        pgoff_t start_pgoff, pgoff_t end_pgoff);
 
@@ -1107,6 +1121,20 @@ static inline void clear_page_pfmemalloc(struct page *page)
                         VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
                         VM_FAULT_FALLBACK)
 
+#define VM_FAULT_RESULT_TRACE \
+       { VM_FAULT_OOM,                 "OOM" }, \
+       { VM_FAULT_SIGBUS,              "SIGBUS" }, \
+       { VM_FAULT_MAJOR,               "MAJOR" }, \
+       { VM_FAULT_WRITE,               "WRITE" }, \
+       { VM_FAULT_HWPOISON,            "HWPOISON" }, \
+       { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" }, \
+       { VM_FAULT_SIGSEGV,             "SIGSEGV" }, \
+       { VM_FAULT_NOPAGE,              "NOPAGE" }, \
+       { VM_FAULT_LOCKED,              "LOCKED" }, \
+       { VM_FAULT_RETRY,               "RETRY" }, \
+       { VM_FAULT_FALLBACK,            "FALLBACK" }, \
+       { VM_FAULT_DONE_COW,            "DONE_COW" }
+
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
@@ -2396,6 +2424,9 @@ extern void clear_huge_page(struct page *page,
 extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned long addr, struct vm_area_struct *vma,
                                unsigned int pages_per_huge_page);
+extern long copy_huge_page_from_user(struct page *dst_page,
+                               const void __user *usr_src,
+                               unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;