1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 ARM Ltd.
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
9 #include <linux/prctl.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/string.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/thread_info.h>
16 #include <linux/uio.h>
18 #include <asm/cpufeature.h>
20 #include <asm/ptrace.h>
21 #include <asm/sysreg.h>
23 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
25 pte_t old_pte = READ_ONCE(*ptep);
27 if (check_swap && is_swap_pte(old_pte)) {
28 swp_entry_t entry = pte_to_swp_entry(old_pte);
30 if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
34 mte_clear_page_tags(page_address(page));
37 void mte_sync_tags(pte_t *ptep, pte_t pte)
39 struct page *page = pte_page(pte);
40 long i, nr_pages = compound_nr(page);
41 bool check_swap = nr_pages == 1;
43 /* if PG_mte_tagged is set, tags have already been initialised */
44 for (i = 0; i < nr_pages; i++, page++) {
45 if (!test_and_set_bit(PG_mte_tagged, &page->flags))
46 mte_sync_page_tags(page, ptep, check_swap);
49 /* ensure the tags are visible before the PTE is set */
53 int memcmp_pages(struct page *page1, struct page *page2)
58 addr1 = page_address(page1);
59 addr2 = page_address(page2);
60 ret = memcmp(addr1, addr2, PAGE_SIZE);
62 if (!system_supports_mte() || ret)
66 * If the page content is identical but at least one of the pages is
67 * tagged, return non-zero to avoid KSM merging. If only one of the
68 * pages is tagged, set_pte_at() may zero or change the tags of the
69 * other page via mte_sync_tags().
71 if (test_bit(PG_mte_tagged, &page1->flags) ||
72 test_bit(PG_mte_tagged, &page2->flags))
73 return addr1 != addr2;
78 static void update_sctlr_el1_tcf0(u64 tcf0)
80 /* ISB required for the kernel uaccess routines */
81 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
85 static void set_sctlr_el1_tcf0(u64 tcf0)
88 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
89 * optimisation. Disable preemption so that it does not see
90 * the variable update before the SCTLR_EL1.TCF0 one.
93 current->thread.sctlr_tcf0 = tcf0;
94 update_sctlr_el1_tcf0(tcf0);
98 static void update_gcr_el1_excl(u64 incl)
100 u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
103 * Note that 'incl' is an include mask (controlled by the user via
104 * prctl()) while GCR_EL1 accepts an exclude mask.
105 * No need for ISB since this only affects EL0 currently, implicit
108 sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
111 static void set_gcr_el1_excl(u64 incl)
113 current->thread.gcr_user_incl = incl;
114 update_gcr_el1_excl(incl);
117 void flush_mte_state(void)
119 if (!system_supports_mte())
122 /* clear any pending asynchronous tag fault */
124 write_sysreg_s(0, SYS_TFSRE0_EL1);
125 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
126 /* disable tag checking */
127 set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
128 /* reset tag generation mask */
132 void mte_thread_switch(struct task_struct *next)
134 if (!system_supports_mte())
137 /* avoid expensive SCTLR_EL1 accesses if no change */
138 if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
139 update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
140 update_gcr_el1_excl(next->thread.gcr_user_incl);
143 void mte_suspend_exit(void)
145 if (!system_supports_mte())
148 update_gcr_el1_excl(current->thread.gcr_user_incl);
151 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
154 u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
156 if (!system_supports_mte())
159 switch (arg & PR_MTE_TCF_MASK) {
160 case PR_MTE_TCF_NONE:
161 tcf0 = SCTLR_EL1_TCF0_NONE;
163 case PR_MTE_TCF_SYNC:
164 tcf0 = SCTLR_EL1_TCF0_SYNC;
166 case PR_MTE_TCF_ASYNC:
167 tcf0 = SCTLR_EL1_TCF0_ASYNC;
173 if (task != current) {
174 task->thread.sctlr_tcf0 = tcf0;
175 task->thread.gcr_user_incl = gcr_incl;
177 set_sctlr_el1_tcf0(tcf0);
178 set_gcr_el1_excl(gcr_incl);
184 long get_mte_ctrl(struct task_struct *task)
188 if (!system_supports_mte())
191 ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
193 switch (task->thread.sctlr_tcf0) {
194 case SCTLR_EL1_TCF0_NONE:
195 ret |= PR_MTE_TCF_NONE;
197 case SCTLR_EL1_TCF0_SYNC:
198 ret |= PR_MTE_TCF_SYNC;
200 case SCTLR_EL1_TCF0_ASYNC:
201 ret |= PR_MTE_TCF_ASYNC;
209 * Access MTE tags in another process' address space as given in mm. Update
210 * the number of tags copied. Return 0 if any tags copied, error otherwise.
211 * Inspired by __access_remote_vm().
213 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
214 struct iovec *kiov, unsigned int gup_flags)
216 struct vm_area_struct *vma;
217 void __user *buf = kiov->iov_base;
218 size_t len = kiov->iov_len;
220 int write = gup_flags & FOLL_WRITE;
222 if (!access_ok(buf, len))
225 if (mmap_read_lock_killable(mm))
229 unsigned long tags, offset;
231 struct page *page = NULL;
233 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
239 * Only copy tags if the page has been mapped as PROT_MTE
240 * (PG_mte_tagged set). Otherwise the tags are not valid and
241 * not accessible to user. Moreover, an mprotect(PROT_MTE)
242 * would cause the existing tags to be cleared if the page
243 * was never mapped with PROT_MTE.
245 if (!(vma->vm_flags & VM_MTE)) {
250 WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
252 /* limit access to the end of the page */
253 offset = offset_in_page(addr);
254 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
256 maddr = page_address(page);
258 tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
259 set_page_dirty_lock(page);
261 tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
265 /* error accessing the tracer's buffer */
271 addr += tags * MTE_GRANULE_SIZE;
273 mmap_read_unlock(mm);
275 /* return an error if no tags copied */
276 kiov->iov_len = buf - kiov->iov_base;
277 if (!kiov->iov_len) {
278 /* check for error accessing the tracee's address space */
289 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
290 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
292 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
293 struct iovec *kiov, unsigned int gup_flags)
295 struct mm_struct *mm;
298 mm = get_task_mm(tsk);
302 if (!tsk->ptrace || (current != tsk->parent) ||
303 ((get_dumpable(mm) != SUID_DUMP_USER) &&
304 !ptracer_capable(tsk, mm->user_ns))) {
309 ret = __access_remote_tags(mm, addr, kiov, gup_flags);
315 int mte_ptrace_copy_tags(struct task_struct *child, long request,
316 unsigned long addr, unsigned long data)
320 struct iovec __user *uiov = (void __user *)data;
321 unsigned int gup_flags = FOLL_FORCE;
323 if (!system_supports_mte())
326 if (get_user(kiov.iov_base, &uiov->iov_base) ||
327 get_user(kiov.iov_len, &uiov->iov_len))
330 if (request == PTRACE_POKEMTETAGS)
331 gup_flags |= FOLL_WRITE;
333 /* align addr to the MTE tag granule */
334 addr &= MTE_GRANULE_MASK;
336 ret = access_remote_tags(child, addr, &kiov, gup_flags);
338 ret = put_user(kiov.iov_len, &uiov->iov_len);