1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 ARM Ltd.
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
9 #include <linux/prctl.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/string.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/thread_info.h>
16 #include <linux/types.h>
17 #include <linux/uio.h>
19 #include <asm/barrier.h>
20 #include <asm/cpufeature.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
25 u64 gcr_kernel_excl __ro_after_init;
27 static bool report_fault_once = true;
29 #ifdef CONFIG_KASAN_HW_TAGS
30 /* Whether the MTE asynchronous mode is enabled. */
31 DEFINE_STATIC_KEY_FALSE(mte_async_mode);
32 EXPORT_SYMBOL_GPL(mte_async_mode);
35 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
37 pte_t old_pte = READ_ONCE(*ptep);
39 if (check_swap && is_swap_pte(old_pte)) {
40 swp_entry_t entry = pte_to_swp_entry(old_pte);
42 if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
46 page_kasan_tag_reset(page);
48 * We need smp_wmb() in between setting the flags and clearing the
49 * tags because if another thread reads page->flags and builds a
50 * tagged address out of it, there is an actual dependency to the
51 * memory access, but on the current thread we do not guarantee that
52 * the new page->flags are visible before the tags were updated.
55 mte_clear_page_tags(page_address(page));
58 void mte_sync_tags(pte_t *ptep, pte_t pte)
60 struct page *page = pte_page(pte);
61 long i, nr_pages = compound_nr(page);
62 bool check_swap = nr_pages == 1;
64 /* if PG_mte_tagged is set, tags have already been initialised */
65 for (i = 0; i < nr_pages; i++, page++) {
66 if (!test_and_set_bit(PG_mte_tagged, &page->flags))
67 mte_sync_page_tags(page, ptep, check_swap);
71 int memcmp_pages(struct page *page1, struct page *page2)
76 addr1 = page_address(page1);
77 addr2 = page_address(page2);
78 ret = memcmp(addr1, addr2, PAGE_SIZE);
80 if (!system_supports_mte() || ret)
84 * If the page content is identical but at least one of the pages is
85 * tagged, return non-zero to avoid KSM merging. If only one of the
86 * pages is tagged, set_pte_at() may zero or change the tags of the
87 * other page via mte_sync_tags().
89 if (test_bit(PG_mte_tagged, &page1->flags) ||
90 test_bit(PG_mte_tagged, &page2->flags))
91 return addr1 != addr2;
96 void mte_init_tags(u64 max_tag)
98 static bool gcr_kernel_excl_initialized;
100 if (!gcr_kernel_excl_initialized) {
102 * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
103 * This conversion extracts an MTE tag from a KASAN tag.
105 u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
108 gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
109 gcr_kernel_excl_initialized = true;
112 /* Enable the kernel exclude mask for random tags generation. */
113 write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
116 static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
118 /* Enable MTE Sync Mode for EL1. */
119 sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
122 pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
125 #ifdef CONFIG_KASAN_HW_TAGS
126 void mte_enable_kernel_sync(void)
129 * Make sure we enter this function when no PE has set
130 * async mode previously.
132 WARN_ONCE(system_uses_mte_async_mode(),
133 "MTE async mode enabled system wide!");
135 __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
138 void mte_enable_kernel_async(void)
140 __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
143 * MTE async mode is set system wide by the first PE that
144 * executes this function.
146 * Note: If in future KASAN acquires a runtime switching
147 * mode in between sync and async, this strategy needs
150 if (!system_uses_mte_async_mode())
151 static_branch_enable(&mte_async_mode);
155 void mte_set_report_once(bool state)
157 WRITE_ONCE(report_fault_once, state);
160 bool mte_report_once(void)
162 return READ_ONCE(report_fault_once);
165 #ifdef CONFIG_KASAN_HW_TAGS
166 void mte_check_tfsr_el1(void)
170 if (!system_supports_mte())
173 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
175 if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
177 * Note: isb() is not required after this direct write
178 * because there is no indirect read subsequent to it
179 * (per ARM DDI 0487F.c table D13-1).
181 write_sysreg_s(0, SYS_TFSR_EL1);
183 kasan_report_async();
188 static void update_sctlr_el1_tcf0(u64 tcf0)
190 /* ISB required for the kernel uaccess routines */
191 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
195 static void set_sctlr_el1_tcf0(u64 tcf0)
198 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
199 * optimisation. Disable preemption so that it does not see
200 * the variable update before the SCTLR_EL1.TCF0 one.
203 current->thread.sctlr_tcf0 = tcf0;
204 update_sctlr_el1_tcf0(tcf0);
208 static void update_gcr_el1_excl(u64 excl)
212 * Note that the mask controlled by the user via prctl() is an
213 * include while GCR_EL1 accepts an exclude mask.
214 * No need for ISB since this only affects EL0 currently, implicit
217 sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
220 static void set_gcr_el1_excl(u64 excl)
222 current->thread.gcr_user_excl = excl;
225 * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
226 * by mte_set_user_gcr() in kernel_exit,
230 void flush_mte_state(void)
232 if (!system_supports_mte())
235 /* clear any pending asynchronous tag fault */
237 write_sysreg_s(0, SYS_TFSRE0_EL1);
238 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
239 /* disable tag checking */
240 set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
241 /* reset tag generation mask */
242 set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
245 void mte_thread_switch(struct task_struct *next)
247 if (!system_supports_mte())
250 /* avoid expensive SCTLR_EL1 accesses if no change */
251 if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
252 update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
257 * Check if an async tag exception occurred at EL1.
259 * Note: On the context switch path we rely on the dsb() present
260 * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
261 * are synchronized before this point.
262 * isb() above is required for the same reason.
265 mte_check_tfsr_el1();
268 void mte_suspend_exit(void)
270 if (!system_supports_mte())
273 update_gcr_el1_excl(gcr_kernel_excl);
276 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
279 u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
280 SYS_GCR_EL1_EXCL_MASK;
282 if (!system_supports_mte())
285 switch (arg & PR_MTE_TCF_MASK) {
286 case PR_MTE_TCF_NONE:
287 tcf0 = SCTLR_EL1_TCF0_NONE;
289 case PR_MTE_TCF_SYNC:
290 tcf0 = SCTLR_EL1_TCF0_SYNC;
292 case PR_MTE_TCF_ASYNC:
293 tcf0 = SCTLR_EL1_TCF0_ASYNC;
299 if (task != current) {
300 task->thread.sctlr_tcf0 = tcf0;
301 task->thread.gcr_user_excl = gcr_excl;
303 set_sctlr_el1_tcf0(tcf0);
304 set_gcr_el1_excl(gcr_excl);
310 long get_mte_ctrl(struct task_struct *task)
313 u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
315 if (!system_supports_mte())
318 ret = incl << PR_MTE_TAG_SHIFT;
320 switch (task->thread.sctlr_tcf0) {
321 case SCTLR_EL1_TCF0_NONE:
322 ret |= PR_MTE_TCF_NONE;
324 case SCTLR_EL1_TCF0_SYNC:
325 ret |= PR_MTE_TCF_SYNC;
327 case SCTLR_EL1_TCF0_ASYNC:
328 ret |= PR_MTE_TCF_ASYNC;
336 * Access MTE tags in another process' address space as given in mm. Update
337 * the number of tags copied. Return 0 if any tags copied, error otherwise.
338 * Inspired by __access_remote_vm().
340 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
341 struct iovec *kiov, unsigned int gup_flags)
343 struct vm_area_struct *vma;
344 void __user *buf = kiov->iov_base;
345 size_t len = kiov->iov_len;
347 int write = gup_flags & FOLL_WRITE;
349 if (!access_ok(buf, len))
352 if (mmap_read_lock_killable(mm))
356 unsigned long tags, offset;
358 struct page *page = NULL;
360 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
366 * Only copy tags if the page has been mapped as PROT_MTE
367 * (PG_mte_tagged set). Otherwise the tags are not valid and
368 * not accessible to user. Moreover, an mprotect(PROT_MTE)
369 * would cause the existing tags to be cleared if the page
370 * was never mapped with PROT_MTE.
372 if (!(vma->vm_flags & VM_MTE)) {
377 WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
379 /* limit access to the end of the page */
380 offset = offset_in_page(addr);
381 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
383 maddr = page_address(page);
385 tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
386 set_page_dirty_lock(page);
388 tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
392 /* error accessing the tracer's buffer */
398 addr += tags * MTE_GRANULE_SIZE;
400 mmap_read_unlock(mm);
402 /* return an error if no tags copied */
403 kiov->iov_len = buf - kiov->iov_base;
404 if (!kiov->iov_len) {
405 /* check for error accessing the tracee's address space */
416 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
417 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
419 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
420 struct iovec *kiov, unsigned int gup_flags)
422 struct mm_struct *mm;
425 mm = get_task_mm(tsk);
429 if (!tsk->ptrace || (current != tsk->parent) ||
430 ((get_dumpable(mm) != SUID_DUMP_USER) &&
431 !ptracer_capable(tsk, mm->user_ns))) {
436 ret = __access_remote_tags(mm, addr, kiov, gup_flags);
442 int mte_ptrace_copy_tags(struct task_struct *child, long request,
443 unsigned long addr, unsigned long data)
447 struct iovec __user *uiov = (void __user *)data;
448 unsigned int gup_flags = FOLL_FORCE;
450 if (!system_supports_mte())
453 if (get_user(kiov.iov_base, &uiov->iov_base) ||
454 get_user(kiov.iov_len, &uiov->iov_len))
457 if (request == PTRACE_POKEMTETAGS)
458 gup_flags |= FOLL_WRITE;
460 /* align addr to the MTE tag granule */
461 addr &= MTE_GRANULE_MASK;
463 ret = access_remote_tags(child, addr, &kiov, gup_flags);
465 ret = put_user(kiov.iov_len, &uiov->iov_len);