Merge tag 'iommu-updates-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / mm / vmalloc.c
index b2ec7f7..3824dc1 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/notifier.h>
 #include <linux/rbtree.h>
 #include <linux/xarray.h>
+#include <linux/io.h>
 #include <linux/rcupdate.h>
 #include <linux/pfn.h>
 #include <linux/kmemleak.h>
@@ -36,6 +37,7 @@
 #include <linux/overflow.h>
 #include <linux/pgtable.h>
 #include <linux/uaccess.h>
+#include <linux/hugetlb.h>
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
 
@@ -83,10 +85,11 @@ static void free_work(struct work_struct *w)
 /*** Page table manipulation functions ***/
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        phys_addr_t phys_addr, pgprot_t prot,
-                       pgtbl_mod_mask *mask)
+                       unsigned int max_page_shift, pgtbl_mod_mask *mask)
 {
        pte_t *pte;
        u64 pfn;
+       unsigned long size = PAGE_SIZE;
 
        pfn = phys_addr >> PAGE_SHIFT;
        pte = pte_alloc_kernel_track(pmd, addr, mask);
@@ -94,9 +97,22 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                return -ENOMEM;
        do {
                BUG_ON(!pte_none(*pte));
+
+#ifdef CONFIG_HUGETLB_PAGE
+               size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
+               if (size != PAGE_SIZE) {
+                       pte_t entry = pfn_pte(pfn, prot);
+
+                       entry = pte_mkhuge(entry);
+                       entry = arch_make_huge_pte(entry, ilog2(size), 0);
+                       set_huge_pte_at(&init_mm, addr, pte, entry);
+                       pfn += PFN_DOWN(size);
+                       continue;
+               }
+#endif
                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
                pfn++;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (pte += PFN_DOWN(size), addr += size, addr != end);
        *mask |= PGTBL_PTE_MODIFIED;
        return 0;
 }
@@ -145,7 +161,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
                        continue;
                }
 
-               if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask))
+               if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
                        return -ENOMEM;
        } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
        return 0;
@@ -771,6 +787,28 @@ unsigned long vmalloc_nr_pages(void)
        return atomic_long_read(&nr_vmalloc_pages);
 }
 
+static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
+{
+       struct vmap_area *va = NULL;
+       struct rb_node *n = vmap_area_root.rb_node;
+
+       while (n) {
+               struct vmap_area *tmp;
+
+               tmp = rb_entry(n, struct vmap_area, rb_node);
+               if (tmp->va_end > addr) {
+                       va = tmp;
+                       if (tmp->va_start <= addr)
+                               break;
+
+                       n = n->rb_left;
+               } else
+                       n = n->rb_right;
+       }
+
+       return va;
+}
+
 static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
        struct rb_node *n = vmap_area_root.rb_node;
@@ -1463,6 +1501,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
                                int node, gfp_t gfp_mask)
 {
        struct vmap_area *va;
+       unsigned long freed;
        unsigned long addr;
        int purged = 0;
        int ret;
@@ -1526,13 +1565,12 @@ overflow:
                goto retry;
        }
 
-       if (gfpflags_allow_blocking(gfp_mask)) {
-               unsigned long freed = 0;
-               blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
-               if (freed > 0) {
-                       purged = 0;
-                       goto retry;
-               }
+       freed = 0;
+       blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
+
+       if (freed > 0) {
+               purged = 0;
+               goto retry;
        }
 
        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
@@ -1592,6 +1630,7 @@ static DEFINE_MUTEX(vmap_purge_lock);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
+#ifdef CONFIG_X86_64
 /*
  * called before a call to iounmap() if the caller wants vm_area_struct's
  * immediately freed.
@@ -1600,6 +1639,7 @@ void set_iounmap_nonlazy(void)
 {
        atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
 }
+#endif /* CONFIG_X86_64 */
 
 /*
  * Purges all lazily-freed vmap areas.
@@ -2761,7 +2801,7 @@ EXPORT_SYMBOL_GPL(vmap_pfn);
 
 static inline unsigned int
 vm_area_alloc_pages(gfp_t gfp, int nid,
-               unsigned int order, unsigned long nr_pages, struct page **pages)
+               unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
 
@@ -2771,10 +2811,32 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order)
-               nr_allocated = alloc_pages_bulk_array_node(
-                       gfp, nid, nr_pages, pages);
-       else
+       if (!order) {
+               while (nr_allocated < nr_pages) {
+                       unsigned int nr, nr_pages_request;
+
+                       /*
+                        * A maximum allowed request is hard-coded and is 100
+                        * pages per call. That is done in order to prevent a
+                        * long preemption off scenario in the bulk-allocator
+                        * so the range is [1:100].
+                        */
+                       nr_pages_request = min(100U, nr_pages - nr_allocated);
+
+                       nr = alloc_pages_bulk_array_node(gfp, nid,
+                               nr_pages_request, pages + nr_allocated);
+
+                       nr_allocated += nr;
+                       cond_resched();
+
+                       /*
+                        * If zero or pages were obtained partly,
+                        * fallback to a single page allocator.
+                        */
+                       if (nr != nr_pages_request)
+                               break;
+               }
+       } else
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.
@@ -2798,9 +2860,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                for (i = 0; i < (1U << order); i++)
                        pages[nr_allocated + i] = page + i;
 
-               if (gfpflags_allow_blocking(gfp))
-                       cond_resched();
-
+               cond_resched();
                nr_allocated += 1U << order;
        }
 
@@ -2912,8 +2972,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
        }
 
-       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
-                       arch_vmap_pmd_supported(prot)) {
+       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
                unsigned long size_per_node;
 
                /*
@@ -2926,11 +2985,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                size_per_node = size;
                if (node == NUMA_NO_NODE)
                        size_per_node /= num_online_nodes();
-               if (size_per_node >= PMD_SIZE) {
+               if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
                        shift = PMD_SHIFT;
-                       align = max(real_align, 1UL << shift);
-                       size = ALIGN(real_size, 1UL << shift);
-               }
+               else
+                       shift = arch_vmap_pte_supported_shift(size_per_node);
+
+               align = max(real_align, 1UL << shift);
+               size = ALIGN(real_size, 1UL << shift);
        }
 
 again:
@@ -3248,9 +3309,14 @@ long vread(char *buf, char *addr, unsigned long count)
                count = -(unsigned long) addr;
 
        spin_lock(&vmap_area_lock);
-       va = __find_vmap_area((unsigned long)addr);
+       va = find_vmap_area_exceed_addr((unsigned long)addr);
        if (!va)
                goto finished;
+
+       /* no intersects with alive vmap_area */
+       if ((unsigned long)addr + count <= va->va_start)
+               goto finished;
+
        list_for_each_entry_from(va, &vmap_area_list, list) {
                if (!count)
                        break;