Merge branches 'acpi-scan' and 'acpi-prm'
[linux-2.6-microblaze.git] / mm / userfaultfd.c
index 0e21328..7a90084 100644 (file)
@@ -483,7 +483,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
                                              unsigned long src_start,
                                              unsigned long len,
                                              enum mcopy_atomic_mode mcopy_mode,
-                                             bool *mmap_changing,
+                                             atomic_t *mmap_changing,
                                              __u64 mode)
 {
        struct vm_area_struct *dst_vma;
@@ -517,7 +517,7 @@ retry:
         * request the user to retry later
         */
        err = -EAGAIN;
-       if (mmap_changing && READ_ONCE(*mmap_changing))
+       if (mmap_changing && atomic_read(mmap_changing))
                goto out_unlock;
 
        /*
@@ -650,28 +650,29 @@ out:
 
 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
                     unsigned long src_start, unsigned long len,
-                    bool *mmap_changing, __u64 mode)
+                    atomic_t *mmap_changing, __u64 mode)
 {
        return __mcopy_atomic(dst_mm, dst_start, src_start, len,
                              MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
 }
 
 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
-                      unsigned long len, bool *mmap_changing)
+                      unsigned long len, atomic_t *mmap_changing)
 {
        return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
                              mmap_changing, 0);
 }
 
 ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
-                      unsigned long len, bool *mmap_changing)
+                      unsigned long len, atomic_t *mmap_changing)
 {
        return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
                              mmap_changing, 0);
 }
 
 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
-                       unsigned long len, bool enable_wp, bool *mmap_changing)
+                       unsigned long len, bool enable_wp,
+                       atomic_t *mmap_changing)
 {
        struct vm_area_struct *dst_vma;
        pgprot_t newprot;
@@ -694,7 +695,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
         * request the user to retry later
         */
        err = -EAGAIN;
-       if (mmap_changing && READ_ONCE(*mmap_changing))
+       if (mmap_changing && atomic_read(mmap_changing))
                goto out_unlock;
 
        err = -ENOENT;