Merge tag 'mm-stable-2024-03-13-20-04' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / s390 / mm / vmem.c
index 186a020..85cddf9 100644 (file)
@@ -33,8 +33,12 @@ static void __ref *vmem_alloc_pages(unsigned int order)
        return memblock_alloc(size, size);
 }
 
-static void vmem_free_pages(unsigned long addr, int order)
+static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap)
 {
+       if (altmap) {
+               vmem_altmap_free(altmap, 1 << order);
+               return;
+       }
        /* We don't expect boot memory to be removed ever. */
        if (!slab_is_available() ||
            WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
@@ -156,7 +160,8 @@ static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
 
 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
-                                 unsigned long end, bool add, bool direct)
+                                 unsigned long end, bool add, bool direct,
+                                 struct vmem_altmap *altmap)
 {
        unsigned long prot, pages = 0;
        int ret = -ENOMEM;
@@ -172,11 +177,11 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
                        if (pte_none(*pte))
                                continue;
                        if (!direct)
-                               vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
+                               vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
                        pte_clear(&init_mm, addr, pte);
                } else if (pte_none(*pte)) {
                        if (!direct) {
-                               void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
+                               void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
 
                                if (!new_page)
                                        goto out;
@@ -213,7 +218,8 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
 
 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
-                                 unsigned long end, bool add, bool direct)
+                                 unsigned long end, bool add, bool direct,
+                                 struct vmem_altmap *altmap)
 {
        unsigned long next, prot, pages = 0;
        int ret = -ENOMEM;
@@ -230,15 +236,15 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
                if (!add) {
                        if (pmd_none(*pmd))
                                continue;
-                       if (pmd_large(*pmd)) {
+                       if (pmd_leaf(*pmd)) {
                                if (IS_ALIGNED(addr, PMD_SIZE) &&
                                    IS_ALIGNED(next, PMD_SIZE)) {
                                        if (!direct)
-                                               vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
+                                               vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
                                        pmd_clear(pmd);
                                        pages++;
                                } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
-                                       vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
+                                       vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
                                        pmd_clear(pmd);
                                }
                                continue;
@@ -261,7 +267,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
                                 * page tables since vmemmap_populate gets
                                 * called for each section separately.
                                 */
-                               new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
+                               new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap);
                                if (new_page) {
                                        set_pmd(pmd, __pmd(__pa(new_page) | prot));
                                        if (!IS_ALIGNED(addr, PMD_SIZE) ||
@@ -275,12 +281,12 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
                        if (!pte)
                                goto out;
                        pmd_populate(&init_mm, pmd, pte);
-               } else if (pmd_large(*pmd)) {
+               } else if (pmd_leaf(*pmd)) {
                        if (!direct)
                                vmemmap_use_sub_pmd(addr, next);
                        continue;
                }
-               ret = modify_pte_table(pmd, addr, next, add, direct);
+               ret = modify_pte_table(pmd, addr, next, add, direct, altmap);
                if (ret)
                        goto out;
                if (!add)
@@ -302,12 +308,12 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
        for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
                if (!pmd_none(*pmd))
                        return;
-       vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
+       vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
        pud_clear(pud);
 }
 
 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
-                           bool add, bool direct)
+                           bool add, bool direct, struct vmem_altmap *altmap)
 {
        unsigned long next, prot, pages = 0;
        int ret = -ENOMEM;
@@ -323,7 +329,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
                if (!add) {
                        if (pud_none(*pud))
                                continue;
-                       if (pud_large(*pud)) {
+                       if (pud_leaf(*pud)) {
                                if (IS_ALIGNED(addr, PUD_SIZE) &&
                                    IS_ALIGNED(next, PUD_SIZE)) {
                                        pud_clear(pud);
@@ -344,10 +350,10 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
                        if (!pmd)
                                goto out;
                        pud_populate(&init_mm, pud, pmd);
-               } else if (pud_large(*pud)) {
+               } else if (pud_leaf(*pud)) {
                        continue;
                }
-               ret = modify_pmd_table(pud, addr, next, add, direct);
+               ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
                if (ret)
                        goto out;
                if (!add)
@@ -370,12 +376,12 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
                if (!pud_none(*pud))
                        return;
        }
-       vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
+       vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
        p4d_clear(p4d);
 }
 
 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
-                           bool add, bool direct)
+                           bool add, bool direct, struct vmem_altmap *altmap)
 {
        unsigned long next;
        int ret = -ENOMEM;
@@ -394,7 +400,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
                                goto out;
                        p4d_populate(&init_mm, p4d, pud);
                }
-               ret = modify_pud_table(p4d, addr, next, add, direct);
+               ret = modify_pud_table(p4d, addr, next, add, direct, altmap);
                if (ret)
                        goto out;
                if (!add)
@@ -415,12 +421,12 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
                if (!p4d_none(*p4d))
                        return;
        }
-       vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
+       vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER, NULL);
        pgd_clear(pgd);
 }
 
 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
-                           bool direct)
+                           bool direct, struct vmem_altmap *altmap)
 {
        unsigned long addr, next;
        int ret = -ENOMEM;
@@ -445,7 +451,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
                                goto out;
                        pgd_populate(&init_mm, pgd, p4d);
                }
-               ret = modify_p4d_table(pgd, addr, next, add, direct);
+               ret = modify_p4d_table(pgd, addr, next, add, direct, altmap);
                if (ret)
                        goto out;
                if (!add)
@@ -458,14 +464,16 @@ out:
        return ret;
 }
 
-static int add_pagetable(unsigned long start, unsigned long end, bool direct)
+static int add_pagetable(unsigned long start, unsigned long end, bool direct,
+                        struct vmem_altmap *altmap)
 {
-       return modify_pagetable(start, end, true, direct);
+       return modify_pagetable(start, end, true, direct, altmap);
 }
 
-static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
+static int remove_pagetable(unsigned long start, unsigned long end, bool direct,
+                           struct vmem_altmap *altmap)
 {
-       return modify_pagetable(start, end, false, direct);
+       return modify_pagetable(start, end, false, direct, altmap);
 }
 
 /*
@@ -474,7 +482,7 @@ static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
 static int vmem_add_range(unsigned long start, unsigned long size)
 {
        start = (unsigned long)__va(start);
-       return add_pagetable(start, start + size, true);
+       return add_pagetable(start, start + size, true, NULL);
 }
 
 /*
@@ -483,7 +491,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
 static void vmem_remove_range(unsigned long start, unsigned long size)
 {
        start = (unsigned long)__va(start);
-       remove_pagetable(start, start + size, true);
+       remove_pagetable(start, start + size, true, NULL);
 }
 
 /*
@@ -496,9 +504,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 
        mutex_lock(&vmem_mutex);
        /* We don't care about the node, just use NUMA_NO_NODE on allocations */
-       ret = add_pagetable(start, end, false);
+       ret = add_pagetable(start, end, false, altmap);
        if (ret)
-               remove_pagetable(start, end, false);
+               remove_pagetable(start, end, false, altmap);
        mutex_unlock(&vmem_mutex);
        return ret;
 }
@@ -509,7 +517,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
                  struct vmem_altmap *altmap)
 {
        mutex_lock(&vmem_mutex);
-       remove_pagetable(start, end, false);
+       remove_pagetable(start, end, false, altmap);
        mutex_unlock(&vmem_mutex);
 }
 
@@ -591,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
                if (!pmd)
                        goto out;
                pud_populate(&init_mm, pud, pmd);
-       } else if (WARN_ON_ONCE(pud_large(*pud))) {
+       } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
                goto out;
        }
        pmd = pmd_offset(pud, addr);
@@ -602,7 +610,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
                if (!pte)
                        goto out;
                pmd_populate(&init_mm, pmd, pte);
-       } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
+       } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
                goto out;
        }
        ptep = pte_offset_kernel(pmd, addr);