mm/page_alloc: silence a KASAN false positive
[linux-2.6-microblaze.git] / mm / swap.c
index dbcab84..de257c0 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -443,8 +443,7 @@ void mark_page_accessed(struct page *page)
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);
-               if (page_is_file_lru(page))
-                       workingset_activation(page);
+               workingset_activation(page);
        }
        if (page_is_idle(page))
                clear_page_idle(page);
@@ -831,8 +830,8 @@ void release_pages(struct page **pages, int nr)
        LIST_HEAD(pages_to_free);
        struct pglist_data *locked_pgdat = NULL;
        struct lruvec *lruvec;
-       unsigned long uninitialized_var(flags);
-       unsigned int uninitialized_var(lock_batch);
+       unsigned long flags;
+       unsigned int lock_batch;
 
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];