tools headers UAPI: Sync linux/prctl.h with the kernel sources
[linux-2.6-microblaze.git] / mm / compaction.c
index b77e138..84fde27 100644 (file)
@@ -907,7 +907,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                }
 
                if (PageHuge(page) && cc->alloc_contig) {
-                       ret = isolate_or_dissolve_huge_page(page);
+                       ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
 
                        /*
                         * Fail isolation in case isolate_or_dissolve_huge_page()
@@ -921,6 +921,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                goto isolate_fail;
                        }
 
+                       if (PageHuge(page)) {
+                               /*
+                                * Hugepage was successfully isolated and placed
+                                * on the cc->migratepages list.
+                                */
+                               low_pfn += compound_nr(page) - 1;
+                               goto isolate_success_no_list;
+                       }
+
                        /*
                         * Ok, the hugepage was dissolved. Now these pages are
                         * Buddy and cannot be re-allocated because they are
@@ -1062,6 +1071,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 isolate_success:
                list_add(&page->lru, &cc->migratepages);
+isolate_success_no_list:
                cc->nr_migratepages += compound_nr(page);
                nr_isolated += compound_nr(page);
 
@@ -2002,8 +2012,8 @@ static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
        unsigned int wmark_low;
 
        /*
-        * Cap the low watermak to avoid excessive compaction
-        * activity in case a user sets the proactivess tunable
+        * Cap the low watermark to avoid excessive compaction
+        * activity in case a user sets the proactiveness tunable
         * close to 100 (maximum).
         */
        wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
@@ -2344,7 +2354,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
                                cc->free_pfn, end_pfn, sync);
 
-       migrate_prep_local();
+       /* lru_add_drain_all could be expensive with involving other CPUs */
+       lru_add_drain();
 
        while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
                int err;
@@ -2519,6 +2530,14 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
         */
        WRITE_ONCE(current->capture_control, NULL);
        *capture = READ_ONCE(capc.page);
+       /*
+        * Technically, it is also possible that compaction is skipped but
+        * the page is still captured out of luck(IRQ came and freed the page).
+        * Returning COMPACT_SUCCESS in such cases helps in properly accounting
+        * the COMPACT[STALL|FAIL] when compaction is skipped.
+        */
+       if (*capture)
+               ret = COMPACT_SUCCESS;
 
        return ret;
 }
@@ -2682,9 +2701,6 @@ static void compact_nodes(void)
                compact_node(nid);
 }
 
-/* The written value is actually unused, all memory is compacted */
-int sysctl_compact_memory;
-
 /*
  * Tunable for proactive compaction. It determines how
  * aggressively the kernel should compact memory in the
@@ -2869,7 +2885,7 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
  */
 static int kcompactd(void *p)
 {
-       pg_data_t *pgdat = (pg_data_t*)p;
+       pg_data_t *pgdat = (pg_data_t *)p;
        struct task_struct *tsk = current;
        unsigned int proactive_defer = 0;