perf metric: Add cache_miss_cycles to metric parse test
[linux-2.6-microblaze.git] / mm / swapfile.c
index 63ac672..987276c 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/swap_slots.h>
 #include <linux/sort.h>
 
-#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <linux/swapops.h>
 #include <linux/swap_cgroup.h>
@@ -1892,7 +1891,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct page *page)
 {
        struct page *swapcache;
-       struct mem_cgroup *memcg;
        spinlock_t *ptl;
        pte_t *pte;
        int ret = 1;
@@ -1902,15 +1900,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        if (unlikely(!page))
                return -ENOMEM;
 
-       if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
-                               &memcg, false)) {
-               ret = -ENOMEM;
-               goto out_nolock;
-       }
-
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
-               mem_cgroup_cancel_charge(page, memcg, false);
                ret = 0;
                goto out;
        }
@@ -1922,10 +1913,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        if (page == swapcache) {
                page_add_anon_rmap(page, vma, addr, false);
-               mem_cgroup_commit_charge(page, memcg, true, false);
        } else { /* ksm created a completely new copy */
                page_add_new_anon_rmap(page, vma, addr, false);
-               mem_cgroup_commit_charge(page, memcg, false, false);
                lru_cache_add_active_or_unevictable(page, vma);
        }
        swap_free(entry);
@@ -1936,7 +1925,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        activate_page(page);
 out:
        pte_unmap_unlock(pte, ptl);
-out_nolock:
        if (page != swapcache) {
                unlock_page(page);
                put_page(page);
@@ -2112,7 +2100,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
        struct vm_area_struct *vma;
        int ret = 0;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->anon_vma) {
                        ret = unuse_vma(vma, type, frontswap,
@@ -2122,7 +2110,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
                }
                cond_resched();
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        return ret;
 }
 
@@ -3799,11 +3787,12 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
 }
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
-                                 gfp_t gfp_mask)
+void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 {
        struct swap_info_struct *si, *next;
-       if (!(gfp_mask & __GFP_IO) || !memcg)
+       int nid = page_to_nid(page);
+
+       if (!(gfp_mask & __GFP_IO))
                return;
 
        if (!blk_cgroup_congested())
@@ -3817,11 +3806,10 @@ void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
                return;
 
        spin_lock(&swap_avail_lock);
-       plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
-                                 avail_lists[node]) {
+       plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
+                                 avail_lists[nid]) {
                if (si->bdev) {
-                       blkcg_schedule_throttle(bdev_get_queue(si->bdev),
-                                               true);
+                       blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
                        break;
                }
        }