static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
-static inline bool PageHugeFreed(struct page *head)
-{
- return page_private(head + 4) == -1UL;
-}
-
-static inline void SetPageHugeFreed(struct page *head)
-{
- set_page_private(head + 4, -1UL);
-}
-
-static inline void ClearPageHugeFreed(struct page *head)
-{
- set_page_private(head + 4, 0);
-}
-
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
- SetPageHugeFreed(page);
+ SetHPageFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
- ClearPageHugeFreed(page);
+ ClearHPageFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
{
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+ hugetlb_set_page_subpool(page, NULL);
set_hugetlb_cgroup(page, NULL);
set_hugetlb_cgroup_rsvd(page, NULL);
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
- ClearPageHugeFreed(page);
+ ClearHPageFreed(page);
spin_unlock(&hugetlb_lock);
}
* We should make sure that the page is already on the free list
* when it is dissolved.
*/
- if (unlikely(!PageHugeFreed(head))) {
+ if (unlikely(!HPageFreed(head))) {
spin_unlock(&hugetlb_lock);
cond_resched();
return pages << h->order;
}
-int hugetlb_reserve_pages(struct inode *inode,
+/* Return true if reservation was successful, false otherwise. */
+bool hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
- long ret, chg, add = -1;
+ long chg, add = -1;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;
/* This should never happen */
if (from > to) {
VM_WARN(1, "%s called with a negative range\n", __func__);
- return -EINVAL;
+ return false;
}
/*
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
- return 0;
+ return true;
/*
* Shared mappings base their reservation on the number of pages that
/* Private mapping. */
resv_map = resv_map_alloc();
if (!resv_map)
- return -ENOMEM;
+ return false;
chg = to - from;
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0) {
- ret = chg;
+ if (chg < 0)
goto out_err;
- }
-
- ret = hugetlb_cgroup_charge_cgroup_rsvd(
- hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
- if (ret < 0) {
- ret = -ENOMEM;
+ if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+ chg * pages_per_huge_page(h), &h_cg) < 0)
goto out_err;
- }
if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
/* For private mappings, the hugetlb_cgroup uncharge info hangs
* reservations already in place (gbl_reserve).
*/
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
- if (gbl_reserve < 0) {
- ret = -ENOSPC;
+ if (gbl_reserve < 0)
goto out_uncharge_cgroup;
- }
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
- ret = hugetlb_acct_memory(h, gbl_reserve);
- if (ret < 0) {
+ if (hugetlb_acct_memory(h, gbl_reserve) < 0)
goto out_put_pages;
- }
/*
* Account for the reservations made. Shared mappings record regions
if (unlikely(add < 0)) {
hugetlb_acct_memory(h, -gbl_reserve);
- ret = add;
goto out_put_pages;
} else if (unlikely(chg > add)) {
/*
hugetlb_acct_memory(h, -rsv_adjust);
}
}
- return 0;
+ return true;
+
out_put_pages:
/* put back original number of pages, chg */
(void)hugepage_subpool_put_pages(spool, chg);
region_abort(resv_map, from, to, regions_needed);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
- return ret;
+ return false;
}
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,