mm: replace vma->vm_flags indirect modification in ksm_madvise
authorSuren Baghdasaryan <surenb@google.com>
Thu, 26 Jan 2023 19:37:50 +0000 (11:37 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 10 Feb 2023 00:51:39 +0000 (16:51 -0800)
Replace indirect modifications to vma->vm_flags with calls to modifier
functions to be able to track flag changes and to keep vma locking
correctness.

Link: https://lkml.kernel.org/r/20230126193752.297968-6-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Minchan Kim <minchan@google.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Peter Oskolkov <posk@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Sebastian Reichel <sebastian.reichel@collabora.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Soheil Hassas Yeganeh <soheil@google.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/kvm/book3s_hv_uvmem.c
arch/s390/mm/gmap.c

index 1d67baa..709ebd5 100644 (file)
@@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
 {
        unsigned long gfn = memslot->base_gfn;
        unsigned long end, start = gfn_to_hva(kvm, gfn);
+       unsigned long vm_flags;
        int ret = 0;
        struct vm_area_struct *vma;
        int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
@@ -409,12 +410,15 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
                        ret = H_STATE;
                        break;
                }
+               /* Copy vm_flags to avoid partial modifications in ksm_madvise */
+               vm_flags = vma->vm_flags;
                ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
-                         merge_flag, &vma->vm_flags);
+                         merge_flag, &vm_flags);
                if (ret) {
                        ret = H_STATE;
                        break;
                }
+               vm_flags_reset(vma, vm_flags);
                start = vma->vm_end;
        } while (end > vma->vm_end);
 
index ab83659..5a716bd 100644 (file)
@@ -2587,14 +2587,18 @@ int gmap_mark_unmergeable(void)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
+       unsigned long vm_flags;
        int ret;
        VMA_ITERATOR(vmi, mm, 0);
 
        for_each_vma(vmi, vma) {
+               /* Copy vm_flags to avoid partial modifications in ksm_madvise */
+               vm_flags = vma->vm_flags;
                ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
-                                 MADV_UNMERGEABLE, &vma->vm_flags);
+                                 MADV_UNMERGEABLE, &vm_flags);
                if (ret)
                        return ret;
+               vm_flags_reset(vma, vm_flags);
        }
        mm->def_flags &= ~VM_MERGEABLE;
        return 0;