1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020 ARM Ltd.
5 #ifndef __ASM_MTE_KASAN_H
6 #define __ASM_MTE_KASAN_H
8 #include <asm/compiler.h>
9 #include <asm/cputype.h>
10 #include <asm/mte-def.h>
14 #include <linux/types.h>
16 #ifdef CONFIG_ARM64_MTE
19 * These functions are meant to be only used from KASAN runtime through
20 * the arch_*() interface defined in asm/memory.h.
21 * These functions don't include system_supports_mte() checks,
22 * as KASAN only calls them when MTE is supported and enabled.
25 static inline u8 mte_get_ptr_tag(void *ptr)
27 /* Note: The format of KASAN tags is 0xF<x> */
28 u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
33 /* Get allocation tag for the address. */
34 static inline u8 mte_get_mem_tag(void *addr)
36 asm(__MTE_PREAMBLE "ldg %0, [%0]"
39 return mte_get_ptr_tag(addr);
42 /* Generate a random tag. */
43 static inline u8 mte_get_random_tag(void)
47 asm(__MTE_PREAMBLE "irg %0, %0"
50 return mte_get_ptr_tag(addr);
53 static inline u64 __stg_post(u64 p)
55 asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
62 static inline u64 __stzg_post(u64 p)
64 asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
71 static inline void __dc_gva(u64 p)
73 asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
76 static inline void __dc_gzva(u64 p)
78 asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
82 * Assign allocation tags for a region of memory based on the pointer tag.
83 * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
84 * size must be MTE_GRANULE_SIZE aligned.
86 static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
89 u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
91 /* Read DC G(Z)VA block size from the system register. */
92 dczid = read_cpuid(DCZID_EL0);
93 dczid_bs = 4ul << (dczid & 0xf);
94 dczid_dzp = (dczid >> 4) & 1;
96 curr = (u64)__tag_set(addr, tag);
98 /* STG/STZG up to the end of the first block. */
101 /* DC GVA / GZVA in [end1, end2) */
105 * The following code uses STG on the first DC GVA block even if the
106 * start address is aligned - it appears to be faster than an alignment
107 * check + conditional branch. Also, if the range size is at least 2 DC
108 * GVA blocks, the first two loops can use post-condition to save one
111 #define SET_MEMTAG_RANGE(stg_post, dc_gva) \
113 if (!dczid_dzp && size >= 2 * dczid_bs) {\
115 curr = stg_post(curr); \
116 } while (curr < end1); \
121 } while (curr < end2); \
124 while (curr < end3) \
125 curr = stg_post(curr); \
129 SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
131 SET_MEMTAG_RANGE(__stg_post, __dc_gva);
132 #undef SET_MEMTAG_RANGE
135 void mte_enable_kernel_sync(void);
136 void mte_enable_kernel_async(void);
137 void mte_enable_kernel_asymm(void);
139 #else /* CONFIG_ARM64_MTE */
141 static inline u8 mte_get_ptr_tag(void *ptr)
146 static inline u8 mte_get_mem_tag(void *addr)
151 static inline u8 mte_get_random_tag(void)
156 static inline void mte_set_mem_tag_range(void *addr, size_t size,
161 static inline void mte_enable_kernel_sync(void)
165 static inline void mte_enable_kernel_async(void)
169 static inline void mte_enable_kernel_asymm(void)
173 #endif /* CONFIG_ARM64_MTE */
175 #endif /* __ASSEMBLY__ */
177 #endif /* __ASM_MTE_KASAN_H */