1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 ARM Ltd.
6 #include <linux/bitops.h>
8 #include <linux/kernel.h>
10 #include <linux/prctl.h>
11 #include <linux/sched.h>
12 #include <linux/sched/mm.h>
13 #include <linux/string.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/thread_info.h>
17 #include <linux/types.h>
18 #include <linux/uio.h>
20 #include <asm/barrier.h>
21 #include <asm/cpufeature.h>
23 #include <asm/ptrace.h>
24 #include <asm/sysreg.h>
26 static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
28 #ifdef CONFIG_KASAN_HW_TAGS
29 /* Whether the MTE asynchronous mode is enabled. */
30 DEFINE_STATIC_KEY_FALSE(mte_async_mode);
31 EXPORT_SYMBOL_GPL(mte_async_mode);
34 static void mte_sync_page_tags(struct page *page, pte_t old_pte,
35 bool check_swap, bool pte_is_tagged)
37 if (check_swap && is_swap_pte(old_pte)) {
38 swp_entry_t entry = pte_to_swp_entry(old_pte);
40 if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
47 page_kasan_tag_reset(page);
49 * We need smp_wmb() in between setting the flags and clearing the
50 * tags because if another thread reads page->flags and builds a
51 * tagged address out of it, there is an actual dependency to the
52 * memory access, but on the current thread we do not guarantee that
53 * the new page->flags are visible before the tags were updated.
57 * Test PG_mte_tagged again in case it was racing with another
60 if (!test_and_set_bit(PG_mte_tagged, &page->flags))
61 mte_clear_page_tags(page_address(page));
64 void mte_sync_tags(pte_t old_pte, pte_t pte)
66 struct page *page = pte_page(pte);
67 long i, nr_pages = compound_nr(page);
68 bool check_swap = nr_pages == 1;
69 bool pte_is_tagged = pte_tagged(pte);
71 /* Early out if there's nothing to do */
72 if (!check_swap && !pte_is_tagged)
75 /* if PG_mte_tagged is set, tags have already been initialised */
76 for (i = 0; i < nr_pages; i++, page++) {
77 if (!test_bit(PG_mte_tagged, &page->flags))
78 mte_sync_page_tags(page, old_pte, check_swap,
82 /* ensure the tags are visible before the PTE is set */
86 int memcmp_pages(struct page *page1, struct page *page2)
91 addr1 = page_address(page1);
92 addr2 = page_address(page2);
93 ret = memcmp(addr1, addr2, PAGE_SIZE);
95 if (!system_supports_mte() || ret)
99 * If the page content is identical but at least one of the pages is
100 * tagged, return non-zero to avoid KSM merging. If only one of the
101 * pages is tagged, set_pte_at() may zero or change the tags of the
102 * other page via mte_sync_tags().
104 if (test_bit(PG_mte_tagged, &page1->flags) ||
105 test_bit(PG_mte_tagged, &page2->flags))
106 return addr1 != addr2;
111 static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
113 /* Enable MTE Sync Mode for EL1. */
114 sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
117 pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
120 #ifdef CONFIG_KASAN_HW_TAGS
121 void mte_enable_kernel_sync(void)
124 * Make sure we enter this function when no PE has set
125 * async mode previously.
127 WARN_ONCE(system_uses_mte_async_mode(),
128 "MTE async mode enabled system wide!");
130 __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
133 void mte_enable_kernel_async(void)
135 __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
138 * MTE async mode is set system wide by the first PE that
139 * executes this function.
141 * Note: If in future KASAN acquires a runtime switching
142 * mode in between sync and async, this strategy needs
145 if (!system_uses_mte_async_mode())
146 static_branch_enable(&mte_async_mode);
150 #ifdef CONFIG_KASAN_HW_TAGS
151 void mte_check_tfsr_el1(void)
153 u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
155 if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
157 * Note: isb() is not required after this direct write
158 * because there is no indirect read subsequent to it
159 * (per ARM DDI 0487F.c table D13-1).
161 write_sysreg_s(0, SYS_TFSR_EL1);
163 kasan_report_async();
168 static void mte_update_sctlr_user(struct task_struct *task)
171 * This must be called with preemption disabled and can only be called
172 * on the current or next task since the CPU must match where the thread
173 * is going to run. The caller is responsible for calling
174 * update_sctlr_el1() later in the same preemption disabled block.
176 unsigned long sctlr = task->thread.sctlr_user;
177 unsigned long mte_ctrl = task->thread.mte_ctrl;
178 unsigned long pref, resolved_mte_tcf;
180 pref = __this_cpu_read(mte_tcf_preferred);
181 resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
182 sctlr &= ~SCTLR_EL1_TCF0_MASK;
183 if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
184 sctlr |= SCTLR_EL1_TCF0_ASYNC;
185 else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
186 sctlr |= SCTLR_EL1_TCF0_SYNC;
187 task->thread.sctlr_user = sctlr;
190 void mte_thread_init_user(void)
192 if (!system_supports_mte())
195 /* clear any pending asynchronous tag fault */
197 write_sysreg_s(0, SYS_TFSRE0_EL1);
198 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
199 /* disable tag checking and reset tag generation mask */
200 set_mte_ctrl(current, 0);
203 void mte_thread_switch(struct task_struct *next)
205 if (!system_supports_mte())
208 mte_update_sctlr_user(next);
211 * Check if an async tag exception occurred at EL1.
213 * Note: On the context switch path we rely on the dsb() present
214 * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
215 * are synchronized before this point.
218 mte_check_tfsr_el1();
221 void mte_cpu_setup(void)
226 * CnP must be enabled only after the MAIR_EL1 register has been set
227 * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
228 * lead to the wrong memory type being used for a brief window during
231 * CnP is not a boot feature so MTE gets enabled before CnP, but let's
232 * make sure that is the case.
234 BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
235 BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
237 /* Normal Tagged memory type at the corresponding MAIR index */
238 sysreg_clear_set(mair_el1,
239 MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
240 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
243 write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
246 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
247 * RGSR_EL1.SEED must be non-zero for IRG to produce
248 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
249 * must initialize it.
251 rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
252 SYS_RGSR_EL1_SEED_SHIFT;
254 rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
255 write_sysreg_s(rgsr, SYS_RGSR_EL1);
257 /* clear any pending tag check faults in TFSR*_EL1 */
258 write_sysreg_s(0, SYS_TFSR_EL1);
259 write_sysreg_s(0, SYS_TFSRE0_EL1);
261 local_flush_tlb_all();
264 void mte_suspend_enter(void)
266 if (!system_supports_mte())
270 * The barriers are required to guarantee that the indirect writes
271 * to TFSR_EL1 are synchronized before we report the state.
276 /* Report SYS_TFSR_EL1 before suspend entry */
277 mte_check_tfsr_el1();
280 void mte_suspend_exit(void)
282 if (!system_supports_mte())
288 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
290 u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
291 SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
293 if (!system_supports_mte())
296 if (arg & PR_MTE_TCF_ASYNC)
297 mte_ctrl |= MTE_CTRL_TCF_ASYNC;
298 if (arg & PR_MTE_TCF_SYNC)
299 mte_ctrl |= MTE_CTRL_TCF_SYNC;
301 task->thread.mte_ctrl = mte_ctrl;
302 if (task == current) {
304 mte_update_sctlr_user(task);
305 update_sctlr_el1(task->thread.sctlr_user);
312 long get_mte_ctrl(struct task_struct *task)
315 u64 mte_ctrl = task->thread.mte_ctrl;
316 u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
317 SYS_GCR_EL1_EXCL_MASK;
319 if (!system_supports_mte())
322 ret = incl << PR_MTE_TAG_SHIFT;
323 if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
324 ret |= PR_MTE_TCF_ASYNC;
325 if (mte_ctrl & MTE_CTRL_TCF_SYNC)
326 ret |= PR_MTE_TCF_SYNC;
332 * Access MTE tags in another process' address space as given in mm. Update
333 * the number of tags copied. Return 0 if any tags copied, error otherwise.
334 * Inspired by __access_remote_vm().
336 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
337 struct iovec *kiov, unsigned int gup_flags)
339 struct vm_area_struct *vma;
340 void __user *buf = kiov->iov_base;
341 size_t len = kiov->iov_len;
343 int write = gup_flags & FOLL_WRITE;
345 if (!access_ok(buf, len))
348 if (mmap_read_lock_killable(mm))
352 unsigned long tags, offset;
354 struct page *page = NULL;
356 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
362 * Only copy tags if the page has been mapped as PROT_MTE
363 * (PG_mte_tagged set). Otherwise the tags are not valid and
364 * not accessible to user. Moreover, an mprotect(PROT_MTE)
365 * would cause the existing tags to be cleared if the page
366 * was never mapped with PROT_MTE.
368 if (!(vma->vm_flags & VM_MTE)) {
373 WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
375 /* limit access to the end of the page */
376 offset = offset_in_page(addr);
377 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
379 maddr = page_address(page);
381 tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
382 set_page_dirty_lock(page);
384 tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
388 /* error accessing the tracer's buffer */
394 addr += tags * MTE_GRANULE_SIZE;
396 mmap_read_unlock(mm);
398 /* return an error if no tags copied */
399 kiov->iov_len = buf - kiov->iov_base;
400 if (!kiov->iov_len) {
401 /* check for error accessing the tracee's address space */
412 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
413 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
415 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
416 struct iovec *kiov, unsigned int gup_flags)
418 struct mm_struct *mm;
421 mm = get_task_mm(tsk);
425 if (!tsk->ptrace || (current != tsk->parent) ||
426 ((get_dumpable(mm) != SUID_DUMP_USER) &&
427 !ptracer_capable(tsk, mm->user_ns))) {
432 ret = __access_remote_tags(mm, addr, kiov, gup_flags);
438 int mte_ptrace_copy_tags(struct task_struct *child, long request,
439 unsigned long addr, unsigned long data)
443 struct iovec __user *uiov = (void __user *)data;
444 unsigned int gup_flags = FOLL_FORCE;
446 if (!system_supports_mte())
449 if (get_user(kiov.iov_base, &uiov->iov_base) ||
450 get_user(kiov.iov_len, &uiov->iov_len))
453 if (request == PTRACE_POKEMTETAGS)
454 gup_flags |= FOLL_WRITE;
456 /* align addr to the MTE tag granule */
457 addr &= MTE_GRANULE_MASK;
459 ret = access_remote_tags(child, addr, &kiov, gup_flags);
461 ret = put_user(kiov.iov_len, &uiov->iov_len);
466 static ssize_t mte_tcf_preferred_show(struct device *dev,
467 struct device_attribute *attr, char *buf)
469 switch (per_cpu(mte_tcf_preferred, dev->id)) {
470 case MTE_CTRL_TCF_ASYNC:
471 return sysfs_emit(buf, "async\n");
472 case MTE_CTRL_TCF_SYNC:
473 return sysfs_emit(buf, "sync\n");
475 return sysfs_emit(buf, "???\n");
479 static ssize_t mte_tcf_preferred_store(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf, size_t count)
485 if (sysfs_streq(buf, "async"))
486 tcf = MTE_CTRL_TCF_ASYNC;
487 else if (sysfs_streq(buf, "sync"))
488 tcf = MTE_CTRL_TCF_SYNC;
493 per_cpu(mte_tcf_preferred, dev->id) = tcf;
498 static DEVICE_ATTR_RW(mte_tcf_preferred);
500 static int register_mte_tcf_preferred_sysctl(void)
504 if (!system_supports_mte())
507 for_each_possible_cpu(cpu) {
508 per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
509 device_create_file(get_cpu_device(cpu),
510 &dev_attr_mte_tcf_preferred);
515 subsys_initcall(register_mte_tcf_preferred_sysctl);