hugetlb: before freeing hugetlb page set dtor to appropriate value
[linux-2.6-microblaze.git] / mm / page_alloc.c
index 856b175..eaa936e 100644 (file)
@@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
                 * comment in free_unref_page.
                 */
                migratetype = get_pcppage_migratetype(page);
-               if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
-                       if (unlikely(is_migrate_isolate(migratetype))) {
-                               list_del(&page->lru);
-                               free_one_page(page_zone(page), page, pfn, 0,
-                                                       migratetype, FPI_NONE);
-                               continue;
-                       }
-
-                       /*
-                        * Non-isolated types over MIGRATE_PCPTYPES get added
-                        * to the MIGRATE_MOVABLE pcp list.
-                        */
-                       set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+               if (unlikely(is_migrate_isolate(migratetype))) {
+                       list_del(&page->lru);
+                       free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+                       continue;
                }
 
                set_page_private(page, pfn);
@@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
        list_for_each_entry_safe(page, next, list, lru) {
                pfn = page_private(page);
                set_page_private(page, 0);
+
+               /*
+                * Non-isolated types over MIGRATE_PCPTYPES get added
+                * to the MIGRATE_MOVABLE pcp list.
+                */
                migratetype = get_pcppage_migratetype(page);
+               if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+                       migratetype = MIGRATE_MOVABLE;
+
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn, migratetype, 0);
 
@@ -4212,7 +4211,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
                if (tsk_is_oom_victim(current) ||
                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
                        filter &= ~SHOW_MEM_FILTER_NODES;
-       if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
+       if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
                filter &= ~SHOW_MEM_FILTER_NODES;
 
        show_mem(filter, nodemask);
@@ -4550,14 +4549,14 @@ static bool __need_reclaim(gfp_t gfp_mask)
        return true;
 }
 
-void __fs_reclaim_acquire(void)
+void __fs_reclaim_acquire(unsigned long ip)
 {
-       lock_map_acquire(&__fs_reclaim_map);
+       lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
 }
 
-void __fs_reclaim_release(void)
+void __fs_reclaim_release(unsigned long ip)
 {
-       lock_map_release(&__fs_reclaim_map);
+       lock_release(&__fs_reclaim_map, ip);
 }
 
 void fs_reclaim_acquire(gfp_t gfp_mask)
@@ -4566,7 +4565,7 @@ void fs_reclaim_acquire(gfp_t gfp_mask)
 
        if (__need_reclaim(gfp_mask)) {
                if (gfp_mask & __GFP_FS)
-                       __fs_reclaim_acquire();
+                       __fs_reclaim_acquire(_RET_IP_);
 
 #ifdef CONFIG_MMU_NOTIFIER
                lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
@@ -4583,7 +4582,7 @@ void fs_reclaim_release(gfp_t gfp_mask)
 
        if (__need_reclaim(gfp_mask)) {
                if (gfp_mask & __GFP_FS)
-                       __fs_reclaim_release();
+                       __fs_reclaim_release(_RET_IP_);
        }
 }
 EXPORT_SYMBOL_GPL(fs_reclaim_release);
@@ -4698,7 +4697,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
                 * comment for __cpuset_node_allowed().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
-       } else if (unlikely(rt_task(current)) && !in_interrupt())
+       } else if (unlikely(rt_task(current)) && in_task())
                alloc_flags |= ALLOC_HARDER;
 
        alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
@@ -5158,7 +5157,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
                 * When we are in the interrupt context, it is irrelevant
                 * to the current task context. It means that any node ok.
                 */
-               if (!in_interrupt() && !ac->nodemask)
+               if (in_task() && !ac->nodemask)
                        ac->nodemask = &cpuset_current_mems_allowed;
                else
                        *alloc_flags |= ALLOC_CPUSET;
@@ -5904,6 +5903,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                " unevictable:%lu dirty:%lu writeback:%lu\n"
                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+               " kernel_misc_reclaimable:%lu\n"
                " free:%lu free_pcp:%lu free_cma:%lu\n",
                global_node_page_state(NR_ACTIVE_ANON),
                global_node_page_state(NR_INACTIVE_ANON),
@@ -5920,6 +5920,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_SHMEM),
                global_node_page_state(NR_PAGETABLE),
                global_zone_page_state(NR_BOUNCE),
+               global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
                global_zone_page_state(NR_FREE_PAGES),
                free_pcp,
                global_zone_page_state(NR_FREE_CMA_PAGES));
@@ -6641,7 +6642,6 @@ static void __meminit zone_init_free_lists(struct zone *zone)
        }
 }
 
-#if !defined(CONFIG_FLATMEM)
 /*
  * Only struct pages that correspond to ranges defined by memblock.memory
  * are zeroed and initialized by going through __init_single_page() during
@@ -6686,13 +6686,6 @@ static void __init init_unavailable_range(unsigned long spfn,
                pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
                        node, zone_names[zone], pgcnt);
 }
-#else
-static inline void init_unavailable_range(unsigned long spfn,
-                                         unsigned long epfn,
-                                         int zone, int node)
-{
-}
-#endif
 
 static void __init memmap_init_zone_range(struct zone *zone,
                                          unsigned long start_pfn,
@@ -6722,7 +6715,7 @@ static void __init memmap_init(void)
 {
        unsigned long start_pfn, end_pfn;
        unsigned long hole_pfn = 0;
-       int i, j, zone_id, nid;
+       int i, j, zone_id = 0, nid;
 
        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
                struct pglist_data *node = NODE_DATA(nid);
@@ -6755,6 +6748,26 @@ static void __init memmap_init(void)
                init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
 }
 
+void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
+                         phys_addr_t min_addr, int nid, bool exact_nid)
+{
+       void *ptr;
+
+       if (exact_nid)
+               ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
+                                                  MEMBLOCK_ALLOC_ACCESSIBLE,
+                                                  nid);
+       else
+               ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
+                                                MEMBLOCK_ALLOC_ACCESSIBLE,
+                                                nid);
+
+       if (ptr && size > 0)
+               page_init_poison(ptr, size);
+
+       return ptr;
+}
+
 static int zone_batchsize(struct zone *zone)
 {
 #ifdef CONFIG_MMU
@@ -7502,7 +7515,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
 }
 
 #ifdef CONFIG_FLATMEM
-static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
+static void __init alloc_node_mem_map(struct pglist_data *pgdat)
 {
        unsigned long __maybe_unused start = 0;
        unsigned long __maybe_unused offset = 0;
@@ -7526,8 +7539,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
                end = pgdat_end_pfn(pgdat);
                end = ALIGN(end, MAX_ORDER_NR_PAGES);
                size =  (end - start) * sizeof(struct page);
-               map = memblock_alloc_node(size, SMP_CACHE_BYTES,
-                                         pgdat->node_id);
+               map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+                                  pgdat->node_id, false);
                if (!map)
                        panic("Failed to allocate %ld bytes for node %d memory map\n",
                              size, pgdat->node_id);
@@ -7548,7 +7561,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
 #endif
 }
 #else
-static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
+static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
 #endif /* CONFIG_FLATMEM */
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT