2 * arch/xtensa/include/asm/initialize_mmu.h
6 * For the new V3 MMU we remap the TLB from virtual == physical
7 * to the standard Linux mapping used in earlier MMU's.
9 * The the MMU we also support a new configuration register that
10 * specifies how the S32C1I instruction operates with the cache
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file "COPYING" in the main directory of
15 * this archive for more details.
17 * Copyright (C) 2008 - 2012 Tensilica, Inc.
19 * Marc Gauthier <marc@tensilica.com>
20 * Pete Delaney <piet@tensilica.com>
23 #ifndef _XTENSA_INITIALIZE_MMU_H
24 #define _XTENSA_INITIALIZE_MMU_H
26 #include <asm/pgtable.h>
27 #include <asm/vectors.h>
29 #if XCHAL_HAVE_PTP_MMU
30 #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
31 #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
33 #define CA_WRITEBACK (0x4)
38 #define XTENSA_HWVERSION_RC_2009_0 230000
42 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
44 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
45 * For details see Documentation/xtensa/atomctl.rst
47 #if XCHAL_DCACHE_IS_COHERENT
48 movi a3, 0x25 /* For SMP/MX -- internal for writeback,
52 movi a3, 0x29 /* non-MX -- Most cores use Std Memory
53 * Controlers which usually can't use RCW
57 #endif /* XCHAL_HAVE_S32C1I &&
58 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
61 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
66 #if !XCHAL_HAVE_VECBASE
67 # error "MMU v3 requires reloc vectors"
75 1: movi a2, 0x10000000
77 #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
78 #define TEMP_MAPPING_VADDR 0x40000000
80 #define TEMP_MAPPING_VADDR 0x00000000
83 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
85 movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
90 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
91 * and jump to the new mapping.
96 addi a3, a3, CA_BYPASS
97 addi a7, a2, 5 - XCHAL_SPANNING_WAY
104 addi a5, a2, -XCHAL_SPANNING_WAY
108 /* Step 3: unmap everything other than current area.
109 * Start at 0x60000000, wrap around, and end with 0x20000000
111 2: movi a4, 0x20000000
118 /* Step 4: Setup MMU with the requested static mappings. */
125 movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
126 movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
130 movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
131 movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
135 #ifdef CONFIG_XTENSA_KSEG_512M
136 movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
137 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
141 movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
142 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
147 movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
148 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
152 movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
153 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
159 /* Jump to self, using final mappings. */
164 /* Step 5: remove temporary mapping. */
173 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
174 XCHAL_HAVE_SPANNING_WAY */
178 .macro initialize_cacheattr
180 #if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
181 #if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
182 #error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
189 .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
190 .long 0x006600, 0x000000, 0x000000, 0x000000
191 .long 0x000000, 0x000000, 0x000000, 0x000000
192 .long 0x000000, 0x000000, 0x000000, 0x000000
195 movi a3, .Lattribute_table
196 movi a4, CONFIG_MEMMAP_CACHEATTR
198 movi a6, XCHAL_MPU_ENTRIES
216 movi a5, XCHAL_SPANNING_WAY
217 movi a6, ~_PAGE_ATTRIB_MASK
218 movi a4, CONFIG_MEMMAP_CACHEATTR
242 #endif /*__ASSEMBLY__*/
244 #endif /* _XTENSA_INITIALIZE_MMU_H */