1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 ARM Ltd.
6 #include <linux/bitops.h>
8 #include <linux/kernel.h>
10 #include <linux/prctl.h>
11 #include <linux/sched.h>
12 #include <linux/sched/mm.h>
13 #include <linux/string.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/thread_info.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/uio.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
24 #include <asm/ptrace.h>
25 #include <asm/sysreg.h>
27 static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
29 #ifdef CONFIG_KASAN_HW_TAGS
31 * The asynchronous and asymmetric MTE modes have the same behavior for
32 * store operations. This flag is set when either of these modes is enabled.
34 DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
35 EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
38 static void mte_sync_page_tags(struct page *page, pte_t old_pte,
39 bool check_swap, bool pte_is_tagged)
41 if (check_swap && is_swap_pte(old_pte)) {
42 swp_entry_t entry = pte_to_swp_entry(old_pte);
44 if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
45 set_page_mte_tagged(page);
54 * Test PG_mte_tagged again in case it was racing with another
57 if (!page_mte_tagged(page)) {
58 mte_clear_page_tags(page_address(page));
59 set_page_mte_tagged(page);
63 void mte_sync_tags(pte_t old_pte, pte_t pte)
65 struct page *page = pte_page(pte);
66 long i, nr_pages = compound_nr(page);
67 bool check_swap = nr_pages == 1;
68 bool pte_is_tagged = pte_tagged(pte);
70 /* Early out if there's nothing to do */
71 if (!check_swap && !pte_is_tagged)
74 /* if PG_mte_tagged is set, tags have already been initialised */
75 for (i = 0; i < nr_pages; i++, page++)
76 if (!page_mte_tagged(page))
77 mte_sync_page_tags(page, old_pte, check_swap,
80 /* ensure the tags are visible before the PTE is set */
84 int memcmp_pages(struct page *page1, struct page *page2)
89 addr1 = page_address(page1);
90 addr2 = page_address(page2);
91 ret = memcmp(addr1, addr2, PAGE_SIZE);
93 if (!system_supports_mte() || ret)
97 * If the page content is identical but at least one of the pages is
98 * tagged, return non-zero to avoid KSM merging. If only one of the
99 * pages is tagged, set_pte_at() may zero or change the tags of the
100 * other page via mte_sync_tags().
102 if (page_mte_tagged(page1) || page_mte_tagged(page2))
103 return addr1 != addr2;
108 static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
110 /* Enable MTE Sync Mode for EL1. */
111 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
112 SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf));
115 pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
118 #ifdef CONFIG_KASAN_HW_TAGS
119 void mte_enable_kernel_sync(void)
122 * Make sure we enter this function when no PE has set
123 * async mode previously.
125 WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
126 "MTE async mode enabled system wide!");
128 __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC);
131 void mte_enable_kernel_async(void)
133 __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC);
136 * MTE async mode is set system wide by the first PE that
137 * executes this function.
139 * Note: If in future KASAN acquires a runtime switching
140 * mode in between sync and async, this strategy needs
143 if (!system_uses_mte_async_or_asymm_mode())
144 static_branch_enable(&mte_async_or_asymm_mode);
147 void mte_enable_kernel_asymm(void)
149 if (cpus_have_cap(ARM64_MTE_ASYMM)) {
150 __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM);
153 * MTE asymm mode behaves as async mode for store
154 * operations. The mode is set system wide by the
155 * first PE that executes this function.
157 * Note: If in future KASAN acquires a runtime switching
158 * mode in between sync and async, this strategy needs
161 if (!system_uses_mte_async_or_asymm_mode())
162 static_branch_enable(&mte_async_or_asymm_mode);
165 * If the CPU does not support MTE asymmetric mode the
166 * kernel falls back on synchronous mode which is the
167 * default for kasan=on.
169 mte_enable_kernel_sync();
174 #ifdef CONFIG_KASAN_HW_TAGS
175 void mte_check_tfsr_el1(void)
177 u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
179 if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
181 * Note: isb() is not required after this direct write
182 * because there is no indirect read subsequent to it
183 * (per ARM DDI 0487F.c table D13-1).
185 write_sysreg_s(0, SYS_TFSR_EL1);
187 kasan_report_async();
193 * This is where we actually resolve the system and process MTE mode
194 * configuration into an actual value in SCTLR_EL1 that affects
197 static void mte_update_sctlr_user(struct task_struct *task)
200 * This must be called with preemption disabled and can only be called
201 * on the current or next task since the CPU must match where the thread
202 * is going to run. The caller is responsible for calling
203 * update_sctlr_el1() later in the same preemption disabled block.
205 unsigned long sctlr = task->thread.sctlr_user;
206 unsigned long mte_ctrl = task->thread.mte_ctrl;
207 unsigned long pref, resolved_mte_tcf;
209 pref = __this_cpu_read(mte_tcf_preferred);
211 * If there is no overlap between the system preferred and
212 * program requested values go with what was requested.
214 resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
215 sctlr &= ~SCTLR_EL1_TCF0_MASK;
217 * Pick an actual setting. The order in which we check for
218 * set bits and map into register values determines our
221 if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
222 sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM);
223 else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
224 sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
225 else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
226 sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
227 task->thread.sctlr_user = sctlr;
230 static void mte_update_gcr_excl(struct task_struct *task)
233 * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
234 * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
236 if (kasan_hw_tags_enabled())
240 ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
241 SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
245 #ifdef CONFIG_KASAN_HW_TAGS
246 /* Only called from assembly, silence sparse */
247 void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
248 __le32 *updptr, int nr_inst);
250 void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
251 __le32 *updptr, int nr_inst)
253 BUG_ON(nr_inst != 1); /* Branch -> NOP */
255 if (kasan_hw_tags_enabled())
256 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
260 void mte_thread_init_user(void)
262 if (!system_supports_mte())
265 /* clear any pending asynchronous tag fault */
267 write_sysreg_s(0, SYS_TFSRE0_EL1);
268 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
269 /* disable tag checking and reset tag generation mask */
270 set_mte_ctrl(current, 0);
273 void mte_thread_switch(struct task_struct *next)
275 if (!system_supports_mte())
278 mte_update_sctlr_user(next);
279 mte_update_gcr_excl(next);
281 /* TCO may not have been disabled on exception entry for the current task. */
282 mte_disable_tco_entry(next);
285 * Check if an async tag exception occurred at EL1.
287 * Note: On the context switch path we rely on the dsb() present
288 * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
289 * are synchronized before this point.
292 mte_check_tfsr_el1();
295 void mte_cpu_setup(void)
300 * CnP must be enabled only after the MAIR_EL1 register has been set
301 * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
302 * lead to the wrong memory type being used for a brief window during
305 * CnP is not a boot feature so MTE gets enabled before CnP, but let's
306 * make sure that is the case.
308 BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
309 BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
311 /* Normal Tagged memory type at the corresponding MAIR index */
312 sysreg_clear_set(mair_el1,
313 MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
314 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
317 write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
320 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
321 * RGSR_EL1.SEED must be non-zero for IRG to produce
322 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
323 * must initialize it.
325 rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
326 SYS_RGSR_EL1_SEED_SHIFT;
328 rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
329 write_sysreg_s(rgsr, SYS_RGSR_EL1);
331 /* clear any pending tag check faults in TFSR*_EL1 */
332 write_sysreg_s(0, SYS_TFSR_EL1);
333 write_sysreg_s(0, SYS_TFSRE0_EL1);
335 local_flush_tlb_all();
338 void mte_suspend_enter(void)
340 if (!system_supports_mte())
344 * The barriers are required to guarantee that the indirect writes
345 * to TFSR_EL1 are synchronized before we report the state.
350 /* Report SYS_TFSR_EL1 before suspend entry */
351 mte_check_tfsr_el1();
354 void mte_suspend_exit(void)
356 if (!system_supports_mte())
362 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
364 u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
365 SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
367 if (!system_supports_mte())
370 if (arg & PR_MTE_TCF_ASYNC)
371 mte_ctrl |= MTE_CTRL_TCF_ASYNC;
372 if (arg & PR_MTE_TCF_SYNC)
373 mte_ctrl |= MTE_CTRL_TCF_SYNC;
376 * If the system supports it and both sync and async modes are
377 * specified then implicitly enable asymmetric mode.
378 * Userspace could see a mix of both sync and async anyway due
379 * to differing or changing defaults on CPUs.
381 if (cpus_have_cap(ARM64_MTE_ASYMM) &&
382 (arg & PR_MTE_TCF_ASYNC) &&
383 (arg & PR_MTE_TCF_SYNC))
384 mte_ctrl |= MTE_CTRL_TCF_ASYMM;
386 task->thread.mte_ctrl = mte_ctrl;
387 if (task == current) {
389 mte_update_sctlr_user(task);
390 mte_update_gcr_excl(task);
391 update_sctlr_el1(task->thread.sctlr_user);
398 long get_mte_ctrl(struct task_struct *task)
401 u64 mte_ctrl = task->thread.mte_ctrl;
402 u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
403 SYS_GCR_EL1_EXCL_MASK;
405 if (!system_supports_mte())
408 ret = incl << PR_MTE_TAG_SHIFT;
409 if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
410 ret |= PR_MTE_TCF_ASYNC;
411 if (mte_ctrl & MTE_CTRL_TCF_SYNC)
412 ret |= PR_MTE_TCF_SYNC;
418 * Access MTE tags in another process' address space as given in mm. Update
419 * the number of tags copied. Return 0 if any tags copied, error otherwise.
420 * Inspired by __access_remote_vm().
422 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
423 struct iovec *kiov, unsigned int gup_flags)
425 struct vm_area_struct *vma;
426 void __user *buf = kiov->iov_base;
427 size_t len = kiov->iov_len;
429 int write = gup_flags & FOLL_WRITE;
431 if (!access_ok(buf, len))
434 if (mmap_read_lock_killable(mm))
438 unsigned long tags, offset;
440 struct page *page = NULL;
442 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
448 * Only copy tags if the page has been mapped as PROT_MTE
449 * (PG_mte_tagged set). Otherwise the tags are not valid and
450 * not accessible to user. Moreover, an mprotect(PROT_MTE)
451 * would cause the existing tags to be cleared if the page
452 * was never mapped with PROT_MTE.
454 if (!(vma->vm_flags & VM_MTE)) {
459 WARN_ON_ONCE(!page_mte_tagged(page));
461 /* limit access to the end of the page */
462 offset = offset_in_page(addr);
463 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
465 maddr = page_address(page);
467 tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
468 set_page_dirty_lock(page);
470 tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
474 /* error accessing the tracer's buffer */
480 addr += tags * MTE_GRANULE_SIZE;
482 mmap_read_unlock(mm);
484 /* return an error if no tags copied */
485 kiov->iov_len = buf - kiov->iov_base;
486 if (!kiov->iov_len) {
487 /* check for error accessing the tracee's address space */
498 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
499 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
501 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
502 struct iovec *kiov, unsigned int gup_flags)
504 struct mm_struct *mm;
507 mm = get_task_mm(tsk);
511 if (!tsk->ptrace || (current != tsk->parent) ||
512 ((get_dumpable(mm) != SUID_DUMP_USER) &&
513 !ptracer_capable(tsk, mm->user_ns))) {
518 ret = __access_remote_tags(mm, addr, kiov, gup_flags);
524 int mte_ptrace_copy_tags(struct task_struct *child, long request,
525 unsigned long addr, unsigned long data)
529 struct iovec __user *uiov = (void __user *)data;
530 unsigned int gup_flags = FOLL_FORCE;
532 if (!system_supports_mte())
535 if (get_user(kiov.iov_base, &uiov->iov_base) ||
536 get_user(kiov.iov_len, &uiov->iov_len))
539 if (request == PTRACE_POKEMTETAGS)
540 gup_flags |= FOLL_WRITE;
542 /* align addr to the MTE tag granule */
543 addr &= MTE_GRANULE_MASK;
545 ret = access_remote_tags(child, addr, &kiov, gup_flags);
547 ret = put_user(kiov.iov_len, &uiov->iov_len);
552 static ssize_t mte_tcf_preferred_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
555 switch (per_cpu(mte_tcf_preferred, dev->id)) {
556 case MTE_CTRL_TCF_ASYNC:
557 return sysfs_emit(buf, "async\n");
558 case MTE_CTRL_TCF_SYNC:
559 return sysfs_emit(buf, "sync\n");
560 case MTE_CTRL_TCF_ASYMM:
561 return sysfs_emit(buf, "asymm\n");
563 return sysfs_emit(buf, "???\n");
567 static ssize_t mte_tcf_preferred_store(struct device *dev,
568 struct device_attribute *attr,
569 const char *buf, size_t count)
573 if (sysfs_streq(buf, "async"))
574 tcf = MTE_CTRL_TCF_ASYNC;
575 else if (sysfs_streq(buf, "sync"))
576 tcf = MTE_CTRL_TCF_SYNC;
577 else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm"))
578 tcf = MTE_CTRL_TCF_ASYMM;
583 per_cpu(mte_tcf_preferred, dev->id) = tcf;
588 static DEVICE_ATTR_RW(mte_tcf_preferred);
590 static int register_mte_tcf_preferred_sysctl(void)
594 if (!system_supports_mte())
597 for_each_possible_cpu(cpu) {
598 per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
599 device_create_file(get_cpu_device(cpu),
600 &dev_attr_mte_tcf_preferred);
605 subsys_initcall(register_mte_tcf_preferred_sysctl);
608 * Return 0 on success, the number of bytes not probed otherwise.
610 size_t mte_probe_user_range(const char __user *uaddr, size_t size)
612 const char __user *end = uaddr + size;
616 __raw_get_user(val, uaddr, err);
620 uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
621 while (uaddr < end) {
623 * A read is sufficient for mte, the caller should have probed
624 * for the pte write permission if required.
626 __raw_get_user(val, uaddr, err);
629 uaddr += MTE_GRANULE_SIZE;