1 // SPDX-License-Identifier: GPL-2.0
3 * HugeTLB Vmemmap Optimization (HVO)
5 * Copyright (c) 2020, ByteDance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 #ifndef _LINUX_HUGETLB_VMEMMAP_H
10 #define _LINUX_HUGETLB_VMEMMAP_H
11 #include <linux/hugetlb.h>
14 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
15 * Documentation/vm/vmemmap_dedup.rst.
17 #define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
18 #define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
20 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
21 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
22 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
24 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
26 return pages_per_huge_page(h) * sizeof(struct page);
30 * Return how many vmemmap size associated with a HugeTLB page that can be
31 * optimized and can be freed to the buddy allocator.
33 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
35 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
37 if (!is_power_of_2(sizeof(struct page)))
39 return size > 0 ? size : 0;
42 static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
47 static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
51 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
55 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
57 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
59 return hugetlb_vmemmap_optimizable_size(h) != 0;
61 #endif /* _LINUX_HUGETLB_VMEMMAP_H */