[PATCH] oom: handle oom_disable exiting
[linux-2.6-microblaze.git] / mm / vmscan.c
index 5d4c4d0..8f35d7d 100644 (file)
@@ -62,6 +62,8 @@ struct scan_control {
        int swap_cluster_max;
 
        int swappiness;
+
+       int all_unreclaimable;
 };
 
 /*
@@ -377,8 +379,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
 
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (!mapping)
-               return 0;               /* truncate got there first */
+       BUG_ON(!PageLocked(page));
+       BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
 
@@ -440,7 +442,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (TestSetPageLocked(page))
                        goto keep;
 
-               BUG_ON(PageActive(page));
+               VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
@@ -547,7 +549,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto free_it;
                }
 
-               if (!remove_mapping(mapping, page))
+               if (!mapping || !remove_mapping(mapping, page))
                        goto keep_locked;
 
 free_it:
@@ -564,7 +566,7 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
@@ -603,7 +605,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               BUG_ON(!PageLRU(page));
+               VM_BUG_ON(!PageLRU(page));
 
                list_del(&page->lru);
                target = src;
@@ -674,7 +676,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                while (!list_empty(&page_list)) {
                        page = lru_to_page(&page_list);
-                       BUG_ON(PageLRU(page));
+                       VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
                        if (PageActive(page))
@@ -695,6 +697,11 @@ done:
        return nr_reclaimed;
 }
 
+static inline int zone_is_near_oom(struct zone *zone)
+{
+       return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
+}
+
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -730,6 +737,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                long distress;
                long swap_tendency;
 
+               if (zone_is_near_oom(zone))
+                       goto force_reclaim_mapped;
+
                /*
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
@@ -765,6 +775,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * memory onto the inactive list.
                 */
                if (swap_tendency >= 100)
+force_reclaim_mapped:
                        reclaim_mapped = 1;
        }
 
@@ -797,9 +808,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
@@ -827,9 +838,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_active)) {
                page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                list_move(&page->lru, &zone->active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -925,6 +936,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
        unsigned long nr_reclaimed = 0;
        int i;
 
+       sc->all_unreclaimable = 1;
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
@@ -941,6 +953,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
+               sc->all_unreclaimable = 0;
+
                nr_reclaimed += shrink_zone(priority, zone, sc);
        }
        return nr_reclaimed;
@@ -1021,6 +1035,9 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
                        blk_congestion_wait(WRITE, HZ/10);
        }
+       /* top priority shrink_caches still had more to do? don't OOM, then */
+       if (!sc.all_unreclaimable)
+               ret = 1;
 out:
        for (i = 0; zones[i] != 0; i++) {
                struct zone *zone = zones[i];
@@ -1153,7 +1170,7 @@ scan:
                        if (zone->all_unreclaimable)
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 4)
+                                   (zone->nr_active + zone->nr_inactive) * 6)
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and