2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/sizes.h>
17 #include <linux/threads.h>
19 #include <asm/cachectl.h>
21 #include <asm/cpu-info.h>
22 #include <asm/dsemul.h>
23 #include <asm/mipsregs.h>
24 #include <asm/prefetch.h>
27 * Return current * instruction pointer ("program counter").
29 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
32 * System setup and hardware flags..
35 extern unsigned int vced_count, vcei_count;
38 * MIPS does have an arch_pick_mmap_layout()
40 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
43 #ifdef CONFIG_KVM_GUEST
44 /* User space process size is limited to 1GB in KVM Guest Mode */
45 #define TASK_SIZE 0x3fff8000UL
48 * User space process size: 2GB. This is hardcoded into a few places,
49 * so don't change it unless you know what you are doing.
51 #define TASK_SIZE 0x80000000UL
54 #define STACK_TOP_MAX TASK_SIZE
56 #define TASK_IS_32BIT_ADDR 1
62 * User space process size: 1TB. This is hardcoded into a few places,
63 * so don't change it unless you know what you are doing. TASK_SIZE
64 * is limited to 1TB by the R4000 architecture; R10000 and better can
65 * support 16TB; the architectural reserve for future expansion is
68 #define TASK_SIZE32 0x7fff8000UL
69 #ifdef CONFIG_MIPS_VA_BITS_48
70 #define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
72 #define TASK_SIZE64 0x10000000000UL
74 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
75 #define STACK_TOP_MAX TASK_SIZE64
77 #define TASK_SIZE_OF(tsk) \
78 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
80 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
84 #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
86 extern unsigned long mips_stack_top(void);
87 #define STACK_TOP mips_stack_top()
90 * This decides where the kernel will search for a free chunk of vm
91 * space during mmap's.
93 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
96 #define NUM_FPU_REGS 32
98 #ifdef CONFIG_CPU_HAS_MSA
99 # define FPU_REG_WIDTH 128
101 # define FPU_REG_WIDTH 64
105 __u32 val32[FPU_REG_WIDTH / 32];
106 __u64 val64[FPU_REG_WIDTH / 64];
109 #ifdef CONFIG_CPU_LITTLE_ENDIAN
110 # define FPR_IDX(width, idx) (idx)
112 # define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
115 #define BUILD_FPR_ACCESS(width) \
116 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
118 return fpr->val##width[FPR_IDX(width, idx)]; \
121 static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
124 fpr->val##width[FPR_IDX(width, idx)] = val; \
131 * It would be nice to add some more fields for emulator statistics,
132 * the additional information is private to the FPU emulator for now.
133 * See arch/mips/include/asm/fpu_emulator.h.
136 struct mips_fpu_struct {
137 union fpureg fpr[NUM_FPU_REGS];
142 #define NUM_DSP_REGS 6
144 typedef unsigned long dspreg_t;
146 struct mips_dsp_state {
147 dspreg_t dspr[NUM_DSP_REGS];
148 unsigned int dspcontrol;
151 #define INIT_CPUMASK { \
155 struct mips3264_watch_reg_state {
156 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
157 64 bit kernel. We use unsigned long as it has the same
159 unsigned long watchlo[NUM_WATCH_REGS];
160 /* Only the mask and IRW bits from watchhi. */
161 u16 watchhi[NUM_WATCH_REGS];
164 union mips_watch_reg_state {
165 struct mips3264_watch_reg_state mips3264;
168 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
170 struct octeon_cop2_state {
171 /* DMFC2 rt, 0x0201 */
172 unsigned long cop2_crc_iv;
173 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
174 unsigned long cop2_crc_length;
175 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
176 unsigned long cop2_crc_poly;
177 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
178 unsigned long cop2_llm_dat[2];
179 /* DMFC2 rt, 0x0084 */
180 unsigned long cop2_3des_iv;
181 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
182 unsigned long cop2_3des_key[3];
183 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
184 unsigned long cop2_3des_result;
185 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
186 unsigned long cop2_aes_inp0;
187 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
188 unsigned long cop2_aes_iv[2];
189 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
191 unsigned long cop2_aes_key[4];
192 /* DMFC2 rt, 0x0110 */
193 unsigned long cop2_aes_keylen;
194 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
195 unsigned long cop2_aes_result[2];
196 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
197 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
198 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
199 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
200 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
201 unsigned long cop2_hsh_datw[15];
202 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
203 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
204 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
205 unsigned long cop2_hsh_ivw[8];
206 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
207 unsigned long cop2_gfm_mult[2];
208 /* DMFC2 rt, 0x025E - Pass2 */
209 unsigned long cop2_gfm_poly;
210 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
211 unsigned long cop2_gfm_result[2];
212 /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
213 unsigned long cop2_sha3[2];
218 struct octeon_cvmseg_state {
219 unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
220 [cpu_dcache_line_size() / sizeof(unsigned long)];
223 #elif defined(CONFIG_CPU_XLP)
224 struct nlm_cop2_state {
232 .cp2 = {{0}, {0}, 0, 0},
241 #ifdef CONFIG_CPU_HAS_MSA
242 # define ARCH_MIN_TASKALIGN 16
243 # define FPU_ALIGN __aligned(16)
245 # define ARCH_MIN_TASKALIGN 8
252 * If you change thread_struct remember to change the #defines below too!
254 struct thread_struct {
255 /* Saved main processor registers. */
257 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
258 unsigned long reg29, reg30, reg31;
260 /* Saved cp0 stuff. */
261 unsigned long cp0_status;
263 /* Saved fpu/fpu emulator stuff. */
264 struct mips_fpu_struct fpu FPU_ALIGN;
265 /* Assigned branch delay slot 'emulation' frame */
266 atomic_t bd_emu_frame;
267 /* PC of the branch from a branch delay slot 'emulation' */
268 unsigned long bd_emu_branch_pc;
269 /* PC to continue from following a branch delay slot 'emulation' */
270 unsigned long bd_emu_cont_pc;
271 #ifdef CONFIG_MIPS_MT_FPAFF
272 /* Emulated instruction count */
273 unsigned long emulated_fp;
274 /* Saved per-thread scheduler affinity mask */
275 cpumask_t user_cpus_allowed;
276 #endif /* CONFIG_MIPS_MT_FPAFF */
278 /* Saved state of the DSP ASE, if available. */
279 struct mips_dsp_state dsp;
281 /* Saved watch register state, if available. */
282 union mips_watch_reg_state watch;
284 /* Other stuff associated with the thread. */
285 unsigned long cp0_badvaddr; /* Last user fault */
286 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
287 unsigned long error_code;
288 unsigned long trap_nr;
289 #ifdef CONFIG_CPU_CAVIUM_OCTEON
290 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
291 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
293 #ifdef CONFIG_CPU_XLP
294 struct nlm_cop2_state cp2;
296 struct mips_abi *abi;
299 #ifdef CONFIG_MIPS_MT_FPAFF
302 .user_cpus_allowed = INIT_CPUMASK,
305 #endif /* CONFIG_MIPS_MT_FPAFF */
307 #define INIT_THREAD { \
309 * Saved main processor registers \
327 * Saved FPU/FPU emulator stuff \
335 * FPU affinity state (null if not FPAFF) \
338 /* Delay slot emulation */ \
339 .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
340 .bd_emu_branch_pc = 0, \
341 .bd_emu_cont_pc = 0, \
350 * saved watch register stuff \
352 .watch = {{{0,},},}, \
354 * Other stuff associated with the process \
361 * Platform specific cop2 registers(null if no COP2) \
368 /* Free all resources held by a thread. */
369 #define release_thread(thread) do { } while(0)
372 * Do necessary setup to start up a newly executed thread.
374 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
376 static inline void flush_thread(void)
380 unsigned long get_wchan(struct task_struct *p);
382 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
383 THREAD_SIZE - 32 - sizeof(struct pt_regs))
384 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
385 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
386 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
387 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
389 #ifdef CONFIG_CPU_LOONGSON3
391 * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
392 * tight read loop is executed, because reads take priority over writes & the
393 * hardware (incorrectly) doesn't ensure that writes will eventually occur.
395 * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
396 * flush from cpu_relax() such that any pending writes will become visible as
399 #define cpu_relax() smp_mb()
401 #define cpu_relax() barrier()
405 * Return_address is a replacement for __builtin_return_address(count)
406 * which on certain architectures cannot reasonably be implemented in GCC
407 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
408 * Note that __builtin_return_address(x>=1) is forbidden because GCC
409 * aborts compilation on some CPUs. It's simply not possible to unwind
410 * some CPU's stackframes.
412 * __builtin_return_address works only for non-leaf functions. We avoid the
413 * overhead of a function call by forcing the compiler to save the return
414 * address register on the stack.
416 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
418 #ifdef CONFIG_CPU_HAS_PREFETCH
420 #define ARCH_HAS_PREFETCH
421 #define prefetch(x) __builtin_prefetch((x), 0, 1)
423 #define ARCH_HAS_PREFETCHW
424 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
429 * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
430 * to the prctl syscall.
432 extern int mips_get_process_fp_mode(struct task_struct *task);
433 extern int mips_set_process_fp_mode(struct task_struct *task,
436 #define GET_FP_MODE(task) mips_get_process_fp_mode(task)
437 #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
439 #endif /* _ASM_PROCESSOR_H */