Merge tag 'v4.18-rc6' into for-4.19/block2
[linux-2.6-microblaze.git] / mm / huge_memory.c
index ba8fdc0..a9e1e09 100644 (file)
@@ -552,7 +552,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-       if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
+       if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1131,8 +1131,8 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
-       pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
-                       GFP_KERNEL);
+       pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
+                             GFP_KERNEL);
        if (unlikely(!pages)) {
                ret |= VM_FAULT_OOM;
                goto out;
@@ -1142,7 +1142,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
                                               vmf->address, page_to_nid(page));
                if (unlikely(!pages[i] ||
-                            mem_cgroup_try_charge(pages[i], vma->vm_mm,
+                            mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
                                     GFP_KERNEL, &memcg, false))) {
                        if (pages[i])
                                put_page(pages[i]);
@@ -1312,7 +1312,7 @@ alloc:
                goto out;
        }
 
-       if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
+       if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
                                        huge_gfp, &memcg, true))) {
                put_page(new_page);
                split_huge_pmd(vma, vmf->pmd, vmf->address);
@@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                if (vma_is_dax(vma))
                        return;
                page = pmd_page(_pmd);
+               if (!PageDirty(page) && pmd_dirty(_pmd))
+                       set_page_dirty(page);
                if (!PageReferenced(page) && pmd_young(_pmd))
                        SetPageReferenced(page);
                page_remove_rmap(page, true);