mm/workingset: prepare the workingset detection infrastructure for anon LRU
[linux-2.6-microblaze.git] / include / linux / mmzone.h
index f16306e..efbd95d 100644 (file)
@@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt)
 
 extern int page_group_by_mobility_disabled;
 
-#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
-#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
 
 #define get_pageblock_migratetype(page)                                        \
-       get_pfnblock_flags_mask(page, page_to_pfn(page),                \
-                       PB_migrate_end, MIGRATETYPE_MASK)
+       get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
 
 struct free_area {
        struct list_head        free_list[MIGRATE_TYPES];
@@ -155,10 +153,6 @@ enum zone_stat_item {
        NR_ZONE_WRITE_PENDING,  /* Count of dirty, writeback and unstable pages */
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
        NR_PAGETABLE,           /* used for pagetables */
-       NR_KERNEL_STACK_KB,     /* measured in KiB */
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
-       NR_KERNEL_SCS_KB,       /* measured in KiB */
-#endif
        /* Second 128 byte cacheline */
        NR_BOUNCE,
 #if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -174,14 +168,20 @@ enum node_stat_item {
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
-       NR_SLAB_RECLAIMABLE,
-       NR_SLAB_UNRECLAIMABLE,
+       NR_SLAB_RECLAIMABLE_B,
+       NR_SLAB_UNRECLAIMABLE_B,
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        WORKINGSET_NODES,
-       WORKINGSET_REFAULT,
-       WORKINGSET_ACTIVATE,
-       WORKINGSET_RESTORE,
+       WORKINGSET_REFAULT_BASE,
+       WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
+       WORKINGSET_REFAULT_FILE,
+       WORKINGSET_ACTIVATE_BASE,
+       WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
+       WORKINGSET_ACTIVATE_FILE,
+       WORKINGSET_RESTORE_BASE,
+       WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
+       WORKINGSET_RESTORE_FILE,
        WORKINGSET_NODERECLAIM,
        NR_ANON_MAPPED, /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -203,6 +203,10 @@ enum node_stat_item {
        NR_KERNEL_MISC_RECLAIMABLE,     /* reclaimable non-slab kernel pages */
        NR_FOLL_PIN_ACQUIRED,   /* via: pin_user_page(), gup flag: FOLL_PIN */
        NR_FOLL_PIN_RELEASED,   /* pages returned via unpin_user_page() */
+       NR_KERNEL_STACK_KB,     /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+       NR_KERNEL_SCS_KB,       /* measured in KiB */
+#endif
        NR_VM_NODE_STAT_ITEMS
 };
 
@@ -213,7 +217,17 @@ enum node_stat_item {
  */
 static __always_inline bool vmstat_item_in_bytes(int idx)
 {
-       return false;
+       /*
+        * Global and per-node slab counters track slab pages.
+        * It's expected that changes are multiples of PAGE_SIZE.
+        * Internally values are stored in pages.
+        *
+        * Per-memcg and per-lruvec counters track memory, consumed
+        * by individual slab objects. These counters are actually
+        * byte-precise.
+        */
+       return (idx == NR_SLAB_RECLAIMABLE_B ||
+               idx == NR_SLAB_UNRECLAIMABLE_B);
 }
 
 /*
@@ -269,8 +283,8 @@ struct lruvec {
        unsigned long                   file_cost;
        /* Non-resident age, driven by LRU movement */
        atomic_long_t                   nonresident_age;
-       /* Refaults at the time of last reclaim cycle */
-       unsigned long                   refaults;
+       /* Refaults at the time of last reclaim cycle, anon=0, file=1 */
+       unsigned long                   refaults[2];
        /* Various lruvec state flags (enum lruvec_flags) */
        unsigned long                   flags;
 #ifdef CONFIG_MEMCG
@@ -829,18 +843,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
 
 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
 
-#ifdef CONFIG_HAVE_MEMORY_PRESENT
-void memory_present(int nid, unsigned long start, unsigned long end);
-#else
-static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
-#endif
-
-#if defined(CONFIG_SPARSEMEM)
-void memblocks_present(void);
-#else
-static inline void memblocks_present(void) {}
-#endif
-
 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 int local_memory_node(int node_id);
 #else
@@ -1397,8 +1399,6 @@ struct mminit_pfnnid_cache {
 #define early_pfn_valid(pfn)   (1)
 #endif
 
-void memory_present(int nid, unsigned long start, unsigned long end);
-
 /*
  * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
  * need to check pfn validity within that MAX_ORDER_NR_PAGES block.