Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / mm / vmalloc.c
index d5cd528..d77830f 100644 (file)
 #include "internal.h"
 #include "pgalloc-track.h"
 
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
+
+static int __init set_nohugeiomap(char *str)
+{
+       ioremap_max_page_shift = PAGE_SHIFT;
+       return 0;
+}
+early_param("nohugeiomap", set_nohugeiomap);
+#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
 static bool __ro_after_init vmap_allow_huge = true;
 
@@ -298,15 +311,14 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
        return err;
 }
 
-int vmap_range(unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot,
-                       unsigned int max_page_shift)
+int ioremap_page_range(unsigned long addr, unsigned long end,
+               phys_addr_t phys_addr, pgprot_t prot)
 {
        int err;
 
-       err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
+       err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
+                                ioremap_max_page_shift);
        flush_cache_vmap(addr, end);
-
        return err;
 }
 
@@ -787,6 +799,28 @@ unsigned long vmalloc_nr_pages(void)
        return atomic_long_read(&nr_vmalloc_pages);
 }
 
+static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
+{
+       struct vmap_area *va = NULL;
+       struct rb_node *n = vmap_area_root.rb_node;
+
+       while (n) {
+               struct vmap_area *tmp;
+
+               tmp = rb_entry(n, struct vmap_area, rb_node);
+               if (tmp->va_end > addr) {
+                       va = tmp;
+                       if (tmp->va_start <= addr)
+                               break;
+
+                       n = n->rb_left;
+               } else
+                       n = n->rb_right;
+       }
+
+       return va;
+}
+
 static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
        struct rb_node *n = vmap_area_root.rb_node;
@@ -1479,6 +1513,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
                                int node, gfp_t gfp_mask)
 {
        struct vmap_area *va;
+       unsigned long freed;
        unsigned long addr;
        int purged = 0;
        int ret;
@@ -1542,13 +1577,12 @@ overflow:
                goto retry;
        }
 
-       if (gfpflags_allow_blocking(gfp_mask)) {
-               unsigned long freed = 0;
-               blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
-               if (freed > 0) {
-                       purged = 0;
-                       goto retry;
-               }
+       freed = 0;
+       blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
+
+       if (freed > 0) {
+               purged = 0;
+               goto retry;
        }
 
        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
@@ -2779,7 +2813,7 @@ EXPORT_SYMBOL_GPL(vmap_pfn);
 
 static inline unsigned int
 vm_area_alloc_pages(gfp_t gfp, int nid,
-               unsigned int order, unsigned long nr_pages, struct page **pages)
+               unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
 
@@ -2789,10 +2823,32 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order)
-               nr_allocated = alloc_pages_bulk_array_node(
-                       gfp, nid, nr_pages, pages);
-       else
+       if (!order) {
+               while (nr_allocated < nr_pages) {
+                       unsigned int nr, nr_pages_request;
+
+                       /*
+                        * A maximum allowed request is hard-coded and is 100
+                        * pages per call. That is done in order to prevent a
+                        * long preemption off scenario in the bulk-allocator
+                        * so the range is [1:100].
+                        */
+                       nr_pages_request = min(100U, nr_pages - nr_allocated);
+
+                       nr = alloc_pages_bulk_array_node(gfp, nid,
+                               nr_pages_request, pages + nr_allocated);
+
+                       nr_allocated += nr;
+                       cond_resched();
+
+                       /*
+                        * If zero or pages were obtained partly,
+                        * fallback to a single page allocator.
+                        */
+                       if (nr != nr_pages_request)
+                               break;
+               }
+       } else
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.
@@ -2816,9 +2872,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                for (i = 0; i < (1U << order); i++)
                        pages[nr_allocated + i] = page + i;
 
-               if (gfpflags_allow_blocking(gfp))
-                       cond_resched();
-
+               cond_resched();
                nr_allocated += 1U << order;
        }
 
@@ -3267,9 +3321,14 @@ long vread(char *buf, char *addr, unsigned long count)
                count = -(unsigned long) addr;
 
        spin_lock(&vmap_area_lock);
-       va = __find_vmap_area((unsigned long)addr);
+       va = find_vmap_area_exceed_addr((unsigned long)addr);
        if (!va)
                goto finished;
+
+       /* no intersects with alive vmap_area */
+       if ((unsigned long)addr + count <= va->va_start)
+               goto finished;
+
        list_for_each_entry_from(va, &vmap_area_list, list) {
                if (!count)
                        break;