hugetlb: perform vmemmap restoration on a list of pages
authorMike Kravetz <mike.kravetz@oracle.com>
Thu, 19 Oct 2023 02:31:06 +0000 (19:31 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:07 +0000 (16:47 -0700)
The routine update_and_free_pages_bulk already performs vmemmap
restoration on the list of hugetlb pages in a separate step.  In
preparation for more functionality to be added in this step, create a new
routine hugetlb_vmemmap_restore_folios() that will restore vmemmap for a
list of folios.

This new routine must provide sufficient feedback about errors and actual
restoration performed so that update_and_free_pages_bulk can perform
optimally.

Special care must be taken when encountering an error from
hugetlb_vmemmap_restore_folios.  We want to continue making as much
forward progress as possible.  A new routine bulk_vmemmap_restore_error
handles this specific situation.

Link: https://lkml.kernel.org/r/20231019023113.345257-5-mike.kravetz@oracle.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Konrad Dybcio <konradybcio@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Usama Arif <usama.arif@bytedance.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c
mm/hugetlb_vmemmap.c
mm/hugetlb_vmemmap.h

index 8b171f8..cf834bb 100644 (file)
@@ -1859,50 +1859,93 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
                schedule_work(&free_hpage_work);
 }
 
-static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
+static void bulk_vmemmap_restore_error(struct hstate *h,
+                                       struct list_head *folio_list,
+                                       struct list_head *non_hvo_folios)
 {
        struct folio *folio, *t_folio;
-       bool clear_dtor = false;
 
-       /*
-        * First allocate required vmemmmap (if necessary) for all folios on
-        * list.  If vmemmap can not be allocated, we can not free folio to
-        * lower level allocator, so add back as hugetlb surplus page.
-        * add_hugetlb_folio() removes the page from THIS list.
-        * Use clear_dtor to note if vmemmap was successfully allocated for
-        * ANY page on the list.
-        */
-       list_for_each_entry_safe(folio, t_folio, list, lru) {
-               if (folio_test_hugetlb_vmemmap_optimized(folio)) {
+       if (!list_empty(non_hvo_folios)) {
+               /*
+                * Free any restored hugetlb pages so that restore of the
+                * entire list can be retried.
+                * The idea is that in the common case of ENOMEM errors freeing
+                * hugetlb pages with vmemmap we will free up memory so that we
+                * can allocate vmemmap for more hugetlb pages.
+                */
+               list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
+                       list_del(&folio->lru);
+                       spin_lock_irq(&hugetlb_lock);
+                       __clear_hugetlb_destructor(h, folio);
+                       spin_unlock_irq(&hugetlb_lock);
+                       update_and_free_hugetlb_folio(h, folio, false);
+                       cond_resched();
+               }
+       } else {
+               /*
+                * In the case where there are no folios which can be
+                * immediately freed, we loop through the list trying to restore
+                * vmemmap individually in the hope that someone elsewhere may
+                * have done something to cause success (such as freeing some
+                * memory).  If unable to restore a hugetlb page, the hugetlb
+                * page is made a surplus page and removed from the list.
+                * If are able to restore vmemmap and free one hugetlb page, we
+                * quit processing the list to retry the bulk operation.
+                */
+               list_for_each_entry_safe(folio, t_folio, folio_list, lru)
                        if (hugetlb_vmemmap_restore(h, &folio->page)) {
+                               list_del(&folio->lru);
                                spin_lock_irq(&hugetlb_lock);
                                add_hugetlb_folio(h, folio, true);
                                spin_unlock_irq(&hugetlb_lock);
-                       } else
-                               clear_dtor = true;
-               }
+                       } else {
+                               list_del(&folio->lru);
+                               spin_lock_irq(&hugetlb_lock);
+                               __clear_hugetlb_destructor(h, folio);
+                               spin_unlock_irq(&hugetlb_lock);
+                               update_and_free_hugetlb_folio(h, folio, false);
+                               cond_resched();
+                               break;
+                       }
        }
+}
+
+static void update_and_free_pages_bulk(struct hstate *h,
+                                               struct list_head *folio_list)
+{
+       long ret;
+       struct folio *folio, *t_folio;
+       LIST_HEAD(non_hvo_folios);
 
        /*
-        * If vmemmmap allocation was performed on any folio above, take lock
-        * to clear destructor of all folios on list.  This avoids the need to
-        * lock/unlock for each individual folio.
-        * The assumption is vmemmap allocation was performed on all or none
-        * of the folios on the list.  This is true expect in VERY rare cases.
+        * First allocate required vmemmmap (if necessary) for all folios.
+        * Carefully handle errors and free up any available hugetlb pages
+        * in an effort to make forward progress.
         */
-       if (clear_dtor) {
+retry:
+       ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
+       if (ret < 0) {
+               bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
+               goto retry;
+       }
+
+       /*
+        * At this point, list should be empty, ret should be >= 0 and there
+        * should only be pages on the non_hvo_folios list.
+        * Do note that the non_hvo_folios list could be empty.
+        * Without HVO enabled, ret will be 0 and there is no need to call
+        * __clear_hugetlb_destructor as this was done previously.
+        */
+       VM_WARN_ON(!list_empty(folio_list));
+       VM_WARN_ON(ret < 0);
+       if (!list_empty(&non_hvo_folios) && ret) {
                spin_lock_irq(&hugetlb_lock);
-               list_for_each_entry(folio, list, lru)
+               list_for_each_entry(folio, &non_hvo_folios, lru)
                        __clear_hugetlb_destructor(h, folio);
                spin_unlock_irq(&hugetlb_lock);
        }
 
-       /*
-        * Free folios back to low level allocators.  vmemmap and destructors
-        * were taken care of above, so update_and_free_hugetlb_folio will
-        * not need to take hugetlb lock.
-        */
-       list_for_each_entry_safe(folio, t_folio, list, lru) {
+       list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
                update_and_free_hugetlb_folio(h, folio, false);
                cond_resched();
        }
index 4558b81..77f44b8 100644 (file)
@@ -480,6 +480,44 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
        return ret;
 }
 
+/**
+ * hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list.
+ * @h:                 hstate.
+ * @folio_list:                list of folios.
+ * @non_hvo_folios:    Output list of folios for which vmemmap exists.
+ *
+ * Return: number of folios for which vmemmap was restored, or an error code
+ *             if an error was encountered restoring vmemmap for a folio.
+ *             Folios that have vmemmap are moved to the non_hvo_folios
+ *             list.  Processing of entries stops when the first error is
+ *             encountered. The folio that experienced the error and all
+ *             non-processed folios will remain on folio_list.
+ */
+long hugetlb_vmemmap_restore_folios(const struct hstate *h,
+                                       struct list_head *folio_list,
+                                       struct list_head *non_hvo_folios)
+{
+       struct folio *folio, *t_folio;
+       long restored = 0;
+       long ret = 0;
+
+       list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
+               if (folio_test_hugetlb_vmemmap_optimized(folio)) {
+                       ret = hugetlb_vmemmap_restore(h, &folio->page);
+                       if (ret)
+                               break;
+                       restored++;
+               }
+
+               /* Add non-optimized folios to output list */
+               list_move(&folio->lru, non_hvo_folios);
+       }
+
+       if (!ret)
+               ret = restored;
+       return ret;
+}
+
 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
 {
index c512e38..a0dcf49 100644 (file)
@@ -19,6 +19,9 @@
 
 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
+long hugetlb_vmemmap_restore_folios(const struct hstate *h,
+                                       struct list_head *folio_list,
+                                       struct list_head *non_hvo_folios);
 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
 
@@ -45,6 +48,14 @@ static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *h
        return 0;
 }
 
+static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
+                                       struct list_head *folio_list,
+                                       struct list_head *non_hvo_folios)
+{
+       list_splice_init(folio_list, non_hvo_folios);
+       return 0;
+}
+
 static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
 {
 }