X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=mm%2Fkhugepaged.c;h=58b0d9c502a1a4ebac2a6820af0c4287f9997438;hb=2d0f6b0aab9afbd6fdf3514cb4acc249d7aebf9c;hp=e749e568e1eabc9367c8c731149dfd9473aeadf4;hpb=9b14b066796d0e8bd05f200f30cb4222f8c0f9f1;p=linux-2.6-microblaze.git diff --git a/mm/khugepaged.c b/mm/khugepaged.c index e749e568e1ea..58b0d9c502a1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -56,6 +56,9 @@ enum scan_result { #define CREATE_TRACE_POINTS #include +static struct task_struct *khugepaged_thread __read_mostly; +static DEFINE_MUTEX(khugepaged_mutex); + /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; @@ -914,6 +917,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait) static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { + /* + * If the hpage allocated earlier was briefly exposed in page cache + * before collapse_file() failed, it is possible that racing lookups + * have not yet completed, and would then be unpleasantly surprised by + * finding the hpage reused for the same mapping at a different offset. + * Just release the previous allocation if there is any danger of that. + */ + if (*hpage && page_count(*hpage) > 1) { + put_page(*hpage); + *hpage = NULL; + } + if (!*hpage) *hpage = khugepaged_alloc_hugepage(wait); @@ -1709,7 +1724,7 @@ static void collapse_file(struct mm_struct *mm, xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, - PAGE_SIZE); + end - index); /* drain pagevecs to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); @@ -2292,8 +2307,6 @@ static void set_recommended_min_free_kbytes(void) int start_stop_khugepaged(void) { - static struct task_struct *khugepaged_thread __read_mostly; - static DEFINE_MUTEX(khugepaged_mutex); int err = 0; mutex_lock(&khugepaged_mutex); @@ -2320,3 +2333,11 @@ fail: mutex_unlock(&khugepaged_mutex); return err; } + +void khugepaged_min_free_kbytes_update(void) +{ + mutex_lock(&khugepaged_mutex); + if (khugepaged_enabled() && khugepaged_thread) + set_recommended_min_free_kbytes(); + mutex_unlock(&khugepaged_mutex); +}