spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-static inline bool lruvec_holds_page_lru_lock(struct page *page,
- struct lruvec *lruvec)
+/* Test requires a stable page->memcg binding, see page_memcg() */
+static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
{
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
lruvec_memcg(lruvec) == page_memcg(page);
struct lruvec *locked_lruvec)
{
if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec);
struct lruvec *locked_lruvec, unsigned long *flags)
{
if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+ VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;