Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
[linux-2.6-microblaze.git] / mm / khugepaged.c
index e749e56..58b0d9c 100644 (file)
@@ -56,6 +56,9 @@ enum scan_result {
 #define CREATE_TRACE_POINTS
 #include <trace/events/huge_memory.h>
 
+static struct task_struct *khugepaged_thread __read_mostly;
+static DEFINE_MUTEX(khugepaged_mutex);
+
 /* default scan 8*512 pte (or vmas) every 30 second */
 static unsigned int khugepaged_pages_to_scan __read_mostly;
 static unsigned int khugepaged_pages_collapsed;
@@ -914,6 +917,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait)
 
 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 {
+       /*
+        * If the hpage allocated earlier was briefly exposed in page cache
+        * before collapse_file() failed, it is possible that racing lookups
+        * have not yet completed, and would then be unpleasantly surprised by
+        * finding the hpage reused for the same mapping at a different offset.
+        * Just release the previous allocation if there is any danger of that.
+        */
+       if (*hpage && page_count(*hpage) > 1) {
+               put_page(*hpage);
+               *hpage = NULL;
+       }
+
        if (!*hpage)
                *hpage = khugepaged_alloc_hugepage(wait);
 
@@ -1709,7 +1724,7 @@ static void collapse_file(struct mm_struct *mm,
                                xas_unlock_irq(&xas);
                                page_cache_sync_readahead(mapping, &file->f_ra,
                                                          file, index,
-                                                         PAGE_SIZE);
+                                                         end - index);
                                /* drain pagevecs to help isolate_lru_page() */
                                lru_add_drain();
                                page = find_lock_page(mapping, index);
@@ -2292,8 +2307,6 @@ static void set_recommended_min_free_kbytes(void)
 
 int start_stop_khugepaged(void)
 {
-       static struct task_struct *khugepaged_thread __read_mostly;
-       static DEFINE_MUTEX(khugepaged_mutex);
        int err = 0;
 
        mutex_lock(&khugepaged_mutex);
@@ -2320,3 +2333,11 @@ fail:
        mutex_unlock(&khugepaged_mutex);
        return err;
 }
+
+void khugepaged_min_free_kbytes_update(void)
+{
+       mutex_lock(&khugepaged_mutex);
+       if (khugepaged_enabled() && khugepaged_thread)
+               set_recommended_min_free_kbytes();
+       mutex_unlock(&khugepaged_mutex);
+}