1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
7 * This handles calls from both 32bit and 64bit mode.
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/syscalls.h>
32 static void refresh_ldt_segments(void)
38 * Make sure that the cached DS and ES descriptors match the updated
42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
51 /* context.lock is held by the task which issued the smp function call */
52 static void flush_ldt(void *__mm)
54 struct mm_struct *mm = __mm;
56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
61 refresh_ldt_segments();
64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
67 struct ldt_struct *new_ldt;
68 unsigned int alloc_size;
70 if (num_entries > LDT_ENTRIES)
73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
78 alloc_size = num_entries * LDT_ENTRY_SIZE;
81 * Xen is very picky: it requires a page-aligned LDT that has no
82 * trailing nonzero bytes in any page that contains LDT descriptors.
83 * Keep it simple: zero the whole allocation and never allocate less
86 if (alloc_size > PAGE_SIZE)
87 new_ldt->entries = vzalloc(alloc_size);
89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
91 if (!new_ldt->entries) {
96 /* The new LDT isn't aliased for PTI yet. */
99 new_ldt->nr_entries = num_entries;
103 #ifdef CONFIG_PAGE_TABLE_ISOLATION
105 static void do_sanity_check(struct mm_struct *mm,
106 bool had_kernel_mapping,
107 bool had_user_mapping)
109 if (mm->context.ldt) {
111 * We already had an LDT. The top-level entry should already
112 * have been allocated and synchronized with the usermode
115 WARN_ON(!had_kernel_mapping);
116 if (static_cpu_has(X86_FEATURE_PTI))
117 WARN_ON(!had_user_mapping);
120 * This is the first time we're mapping an LDT for this process.
121 * Sync the pgd to the usermode tables.
123 WARN_ON(had_kernel_mapping);
124 if (static_cpu_has(X86_FEATURE_PTI))
125 WARN_ON(had_user_mapping);
129 #ifdef CONFIG_X86_PAE
131 static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
139 p4d = p4d_offset(pgd, va);
143 pud = pud_offset(p4d, va);
147 return pmd_offset(pud, va);
150 static void map_ldt_struct_to_user(struct mm_struct *mm)
152 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
153 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
154 pmd_t *k_pmd, *u_pmd;
156 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
157 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
159 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
160 set_pmd(u_pmd, *k_pmd);
163 static void sanity_check_ldt_mapping(struct mm_struct *mm)
165 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
166 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
167 bool had_kernel, had_user;
168 pmd_t *k_pmd, *u_pmd;
170 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
171 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
172 had_kernel = (k_pmd->pmd != 0);
173 had_user = (u_pmd->pmd != 0);
175 do_sanity_check(mm, had_kernel, had_user);
178 #else /* !CONFIG_X86_PAE */
180 static void map_ldt_struct_to_user(struct mm_struct *mm)
182 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
184 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
185 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
188 static void sanity_check_ldt_mapping(struct mm_struct *mm)
190 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
191 bool had_kernel = (pgd->pgd != 0);
192 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
194 do_sanity_check(mm, had_kernel, had_user);
197 #endif /* CONFIG_X86_PAE */
200 * If PTI is enabled, this maps the LDT into the kernelmode and
201 * usermode tables for the given mm.
204 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
212 if (!static_cpu_has(X86_FEATURE_PTI))
216 * Any given ldt_struct should have map_ldt_struct() called at most
219 WARN_ON(ldt->slot != -1);
221 /* Check if the current mappings are sane */
222 sanity_check_ldt_mapping(mm);
225 * Did we already have the top level entry allocated? We can't
226 * use pgd_none() for this because it doens't do anything on
227 * 4-level page table kernels.
229 pgd = pgd_offset(mm, LDT_BASE_ADDR);
231 is_vmalloc = is_vmalloc_addr(ldt->entries);
233 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
235 for (i = 0; i < nr_pages; i++) {
236 unsigned long offset = i << PAGE_SHIFT;
237 const void *src = (char *)ldt->entries + offset;
242 va = (unsigned long)ldt_slot_va(slot) + offset;
243 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
244 page_to_pfn(virt_to_page(src));
246 * Treat the PTI LDT range as a *userspace* range.
247 * get_locked_pte() will allocate all needed pagetables
248 * and account for them in this mm.
250 ptep = get_locked_pte(mm, va, &ptl);
254 * Map it RO so the easy to find address is not a primary
255 * target via some kernel interface which misses a
258 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
259 /* Filter out unsuppored __PAGE_KERNEL* bits: */
260 pgprot_val(pte_prot) &= __supported_pte_mask;
261 pte = pfn_pte(pfn, pte_prot);
262 set_pte_at(mm, va, ptep, pte);
263 pte_unmap_unlock(ptep, ptl);
266 /* Propagate LDT mapping to the user page-table */
267 map_ldt_struct_to_user(mm);
273 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
281 /* LDT map/unmap is only required for PTI */
282 if (!static_cpu_has(X86_FEATURE_PTI))
285 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
287 for (i = 0; i < nr_pages; i++) {
288 unsigned long offset = i << PAGE_SHIFT;
292 va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
293 ptep = get_locked_pte(mm, va, &ptl);
294 pte_clear(mm, va, ptep);
295 pte_unmap_unlock(ptep, ptl);
298 va = (unsigned long)ldt_slot_va(ldt->slot);
299 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
302 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
305 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
310 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
313 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
315 static void free_ldt_pgtables(struct mm_struct *mm)
317 #ifdef CONFIG_PAGE_TABLE_ISOLATION
318 struct mmu_gather tlb;
319 unsigned long start = LDT_BASE_ADDR;
320 unsigned long end = LDT_END_ADDR;
322 if (!static_cpu_has(X86_FEATURE_PTI))
325 tlb_gather_mmu(&tlb, mm, start, end);
326 free_pgd_range(&tlb, start, end, start, end);
327 tlb_finish_mmu(&tlb, start, end);
331 /* After calling this, the LDT is immutable. */
332 static void finalize_ldt_struct(struct ldt_struct *ldt)
334 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
337 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
339 mutex_lock(&mm->context.lock);
341 /* Synchronizes with READ_ONCE in load_mm_ldt. */
342 smp_store_release(&mm->context.ldt, ldt);
344 /* Activate the LDT for all CPUs using currents mm. */
345 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
347 mutex_unlock(&mm->context.lock);
350 static void free_ldt_struct(struct ldt_struct *ldt)
355 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
356 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
357 vfree_atomic(ldt->entries);
359 free_page((unsigned long)ldt->entries);
364 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
365 * the new task is not running, so nothing can be installed.
367 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
369 struct ldt_struct *new_ldt;
375 mutex_lock(&old_mm->context.lock);
376 if (!old_mm->context.ldt)
379 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
385 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
386 new_ldt->nr_entries * LDT_ENTRY_SIZE);
387 finalize_ldt_struct(new_ldt);
389 retval = map_ldt_struct(mm, new_ldt, 0);
391 free_ldt_pgtables(mm);
392 free_ldt_struct(new_ldt);
395 mm->context.ldt = new_ldt;
398 mutex_unlock(&old_mm->context.lock);
403 * No need to lock the MM as we are the last user
405 * 64bit: Don't touch the LDT register - we're already in the next thread.
407 void destroy_context_ldt(struct mm_struct *mm)
409 free_ldt_struct(mm->context.ldt);
410 mm->context.ldt = NULL;
413 void ldt_arch_exit_mmap(struct mm_struct *mm)
415 free_ldt_pgtables(mm);
418 static int read_ldt(void __user *ptr, unsigned long bytecount)
420 struct mm_struct *mm = current->mm;
421 unsigned long entries_size;
424 down_read(&mm->context.ldt_usr_sem);
426 if (!mm->context.ldt) {
431 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
432 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
434 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
435 if (entries_size > bytecount)
436 entries_size = bytecount;
438 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
443 if (entries_size != bytecount) {
444 /* Zero-fill the rest and pretend we read bytecount bytes. */
445 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
453 up_read(&mm->context.ldt_usr_sem);
457 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
459 /* CHECKME: Can we use _one_ random number ? */
461 unsigned long size = 5 * sizeof(struct desc_struct);
463 unsigned long size = 128;
465 if (bytecount > size)
467 if (clear_user(ptr, bytecount))
472 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
474 struct mm_struct *mm = current->mm;
475 struct ldt_struct *new_ldt, *old_ldt;
476 unsigned int old_nr_entries, new_nr_entries;
477 struct user_desc ldt_info;
478 struct desc_struct ldt;
482 if (bytecount != sizeof(ldt_info))
485 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
489 if (ldt_info.entry_number >= LDT_ENTRIES)
491 if (ldt_info.contents == 3) {
494 if (ldt_info.seg_not_present == 0)
498 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
499 LDT_empty(&ldt_info)) {
500 /* The user wants to clear the entry. */
501 memset(&ldt, 0, sizeof(ldt));
503 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
508 fill_ldt(&ldt, &ldt_info);
513 if (down_write_killable(&mm->context.ldt_usr_sem))
516 old_ldt = mm->context.ldt;
517 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
518 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
521 new_ldt = alloc_ldt_struct(new_nr_entries);
526 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
528 new_ldt->entries[ldt_info.entry_number] = ldt;
529 finalize_ldt_struct(new_ldt);
532 * If we are using PTI, map the new LDT into the userspace pagetables.
533 * If there is already an LDT, use the other slot so that other CPUs
534 * will continue to use the old LDT until install_ldt() switches
535 * them over to the new LDT.
537 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
540 * This only can fail for the first LDT setup. If an LDT is
541 * already installed then the PTE page is already
542 * populated. Mop up a half populated page table.
544 if (!WARN_ON_ONCE(old_ldt))
545 free_ldt_pgtables(mm);
546 free_ldt_struct(new_ldt);
550 install_ldt(mm, new_ldt);
551 unmap_ldt_struct(mm, old_ldt);
552 free_ldt_struct(old_ldt);
556 up_write(&mm->context.ldt_usr_sem);
561 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
562 unsigned long , bytecount)
568 ret = read_ldt(ptr, bytecount);
571 ret = write_ldt(ptr, bytecount, 1);
574 ret = read_default_ldt(ptr, bytecount);
577 ret = write_ldt(ptr, bytecount, 0);
581 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
582 * return type, but tht ABI for sys_modify_ldt() expects
583 * 'int'. This cast gives us an int-sized value in %rax
584 * for the return code. The 'unsigned' is necessary so
585 * the compiler does not try to sign-extend the negative
586 * return codes into the high half of the register when
587 * taking the value from int->long.
589 return (unsigned int)ret;