ACPI: DPTF: Add PCH FIVR participant driver
[linux-2.6-microblaze.git] / mm / swap_state.c
index a29b33c..c16eebb 100644 (file)
@@ -57,8 +57,8 @@ static bool enable_vma_readahead __read_mostly = true;
 #define GET_SWAP_RA_VAL(vma)                                   \
        (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 
-#define INC_CACHE_INFO(x)      do { swap_cache_info.x++; } while (0)
-#define ADD_CACHE_INFO(x, nr)  do { swap_cache_info.x += (nr); } while (0)
+#define INC_CACHE_INFO(x)      data_race(swap_cache_info.x++)
+#define ADD_CACHE_INFO(x, nr)  data_race(swap_cache_info.x += (nr))
 
 static struct {
        unsigned long add_total;
@@ -106,6 +106,20 @@ void show_swap_cache_info(void)
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       struct address_space *address_space = swap_address_space(entry);
+       pgoff_t idx = swp_offset(entry);
+       struct page *page;
+
+       page = find_get_entry(address_space, idx);
+       if (xa_is_value(page))
+               return page;
+       if (page)
+               put_page(page);
+       return NULL;
+}
+
 /*
  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
@@ -116,7 +130,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = hpage_nr_pages(page);
+       unsigned long i, nr = thp_nr_pages(page);
        void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -169,7 +183,7 @@ void __delete_from_swap_cache(struct page *page,
                        swp_entry_t entry, void *shadow)
 {
        struct address_space *address_space = swap_address_space(entry);
-       int i, nr = hpage_nr_pages(page);
+       int i, nr = thp_nr_pages(page);
        pgoff_t idx = swp_offset(entry);
        XA_STATE(xas, &address_space->i_pages, idx);
 
@@ -264,7 +278,7 @@ void delete_from_swap_cache(struct page *page)
        xa_unlock_irq(&address_space->i_pages);
 
        put_swap_page(page, entry);
-       page_ref_sub(page, hpage_nr_pages(page));
+       page_ref_sub(page, thp_nr_pages(page));
 }
 
 void clear_shadow_from_swap_cache(int type, unsigned long begin,
@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 {
        struct swap_info_struct *si;
        struct page *page;
+       void *shadow = NULL;
 
        *new_page_allocated = false;
 
@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                goto fail_unlock;
        }
 
-       /* XXX: Move to lru_cache_add() when it supports new vs putback */
-       spin_lock_irq(&page_pgdat(page)->lru_lock);
-       lru_note_cost_page(page);
-       spin_unlock_irq(&page_pgdat(page)->lru_lock);
+       if (shadow)
+               workingset_refault(page, shadow);
 
        /* Caller will initiate read into locked page */
        SetPageWorkingset(page);