}
 
        while (batchcount > 0) {
-               struct list_head *entry;
                struct page *page;
                /* Get slab alloc is to come from. */
-               entry = n->slabs_partial.next;
-               if (entry == &n->slabs_partial) {
+               page = list_first_entry_or_null(&n->slabs_partial,
+                               struct page, lru);
+               if (!page) {
                        n->free_touched = 1;
-                       entry = n->slabs_free.next;
-                       if (entry == &n->slabs_free)
+                       page = list_first_entry_or_null(&n->slabs_free,
+                                       struct page, lru);
+                       if (!page)
                                goto must_grow;
                }
 
-               page = list_entry(entry, struct page, lru);
                check_spinlock_acquired(cachep);
 
                /*
 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
                                int nodeid)
 {
-       struct list_head *entry;
        struct page *page;
        struct kmem_cache_node *n;
        void *obj;
 retry:
        check_irq_off();
        spin_lock(&n->list_lock);
-       entry = n->slabs_partial.next;
-       if (entry == &n->slabs_partial) {
+       page = list_first_entry_or_null(&n->slabs_partial,
+                       struct page, lru);
+       if (!page) {
                n->free_touched = 1;
-               entry = n->slabs_free.next;
-               if (entry == &n->slabs_free)
+               page = list_first_entry_or_null(&n->slabs_free,
+                               struct page, lru);
+               if (!page)
                        goto must_grow;
        }
 
-       page = list_entry(entry, struct page, lru);
        check_spinlock_acquired_node(cachep, nodeid);
 
        STATS_INC_NODEALLOCS(cachep);