1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #include <linux/export.h>
7 #include <linux/mman.h>
9 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
10 EXPORT_SYMBOL(shm_align_mask);
12 #define COLOUR_ALIGN(addr, pgoff) \
13 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
14 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
16 enum mmap_allocation_direction {UP, DOWN};
18 static unsigned long arch_get_unmapped_area_common(struct file *filp,
19 unsigned long addr0, unsigned long len, unsigned long pgoff,
20 unsigned long flags, enum mmap_allocation_direction dir)
22 struct mm_struct *mm = current->mm;
23 struct vm_area_struct *vma;
24 unsigned long addr = addr0;
26 struct vm_unmapped_area_info info;
28 if (unlikely(len > TASK_SIZE))
31 if (flags & MAP_FIXED) {
32 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
33 if (TASK_SIZE - len < addr)
37 * We do not accept a shared mapping if it would violate
38 * cache aliasing constraints.
40 if ((flags & MAP_SHARED) &&
41 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
47 if (filp || (flags & MAP_SHARED))
50 /* requesting a specific address */
53 addr = COLOUR_ALIGN(addr, pgoff);
55 addr = PAGE_ALIGN(addr);
57 vma = find_vma(mm, addr);
58 if (TASK_SIZE - len >= addr &&
59 (!vma || addr + len <= vm_start_gap(vma)))
64 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
65 info.align_offset = pgoff << PAGE_SHIFT;
68 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
69 info.low_limit = PAGE_SIZE;
70 info.high_limit = mm->mmap_base;
71 addr = vm_unmapped_area(&info);
73 if (!(addr & ~PAGE_MASK))
77 * A failed mmap() very likely causes application failure,
78 * so fall back to the bottom-up function here. This scenario
79 * can happen with large stack limits and large mmap()
85 info.low_limit = mm->mmap_base;
86 info.high_limit = TASK_SIZE;
87 return vm_unmapped_area(&info);
90 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
91 unsigned long len, unsigned long pgoff, unsigned long flags)
93 return arch_get_unmapped_area_common(filp,
94 addr0, len, pgoff, flags, UP);
98 * There is no need to export this but sched.h declares the function as
99 * extern so making it static here results in an error.
101 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
102 unsigned long addr0, unsigned long len, unsigned long pgoff,
105 return arch_get_unmapped_area_common(filp,
106 addr0, len, pgoff, flags, DOWN);
109 int __virt_addr_valid(volatile void *kaddr)
111 unsigned long vaddr = (unsigned long)kaddr;
113 if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
116 return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
118 EXPORT_SYMBOL_GPL(__virt_addr_valid);