1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
5 * This file contains the architecture-dependent parts of process handling.
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/sched.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/kernel.h>
17 #include <linux/unistd.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/reboot.h>
21 #include <linux/elfcore.h>
23 #include <linux/tick.h>
24 #include <linux/slab.h>
25 #include <linux/mman.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/smp.h>
30 #include <asm/core_reg.h>
31 #include <asm/user_gateway.h>
33 #include <asm/traps.h>
34 #include <asm/switch_to.h>
37 * Wait for the next interrupt and enable local interrupts
39 void arch_cpu_idle(void)
44 * Quickly jump straight into the interrupt entry point without actually
45 * triggering an interrupt. When TXSTATI gets read the processor will
46 * block until an interrupt is triggered.
48 asm volatile (/* Switch into ISTAT mode */
50 /* Enable local interrupts */
53 * We can't directly "SWAP PC, PCX", so we swap via a
54 * temporary. Essentially we do:
55 * PCX_new = 1f (the place to continue execution)
58 "ADD %0, CPC0, #(1f-.)\n\t"
61 /* Continue execution here with interrupts enabled */
64 : "r" (get_trigger_mask()));
67 #ifdef CONFIG_HOTPLUG_CPU
68 void arch_cpu_idle_dead(void)
74 void (*pm_power_off)(void);
75 EXPORT_SYMBOL(pm_power_off);
77 void (*soc_restart)(char *cmd);
78 void (*soc_halt)(void);
80 void machine_restart(char *cmd)
84 hard_processor_halt(HALT_OK);
87 void machine_halt(void)
92 hard_processor_halt(HALT_OK);
95 void machine_power_off(void)
100 hard_processor_halt(HALT_OK);
108 void show_regs(struct pt_regs *regs)
111 const char *AX0_names[] = {"A0StP", "A0FrP"};
112 const char *AX1_names[] = {"A1GbP", "A1LbP"};
114 const char *DX0_names[] = {
125 const char *DX1_names[] = {
136 show_regs_print_info(KERN_INFO);
138 pr_info(" pt_regs @ %p\n", regs);
139 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
140 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
141 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
142 regs->ctx.Flags & FLAG_N ? 'N' : 'n',
143 regs->ctx.Flags & FLAG_O ? 'O' : 'o',
144 regs->ctx.Flags & FLAG_C ? 'C' : 'c');
145 pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
146 pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
149 for (i = 0; i < 2; i++) {
150 pr_info(" %s = 0x%08x ",
153 printk(" %s = 0x%08x\n",
158 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
159 pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
161 /* Special place with AXx.2 */
162 pr_info(" A0.2 = 0x%08x ",
163 regs->ctx.Ext.AX2.U0);
164 printk(" A1.2 = 0x%08x\n",
165 regs->ctx.Ext.AX2.U1);
167 /* 'extended' AX regs (nominally, just AXx.3) */
168 for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
169 pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0);
170 printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
173 for (i = 0; i < 8; i++) {
174 pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0);
175 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
178 show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
182 * Copy architecture-specific thread state
184 int copy_thread(unsigned long clone_flags, unsigned long usp,
185 unsigned long kthread_arg, struct task_struct *tsk)
187 struct pt_regs *childregs = task_pt_regs(tsk);
188 void *kernel_context = ((void *) childregs +
189 sizeof(struct pt_regs));
190 unsigned long global_base;
192 BUG_ON(((unsigned long)childregs) & 0x7);
193 BUG_ON(((unsigned long)kernel_context) & 0x7);
195 memset(&tsk->thread.kernel_context, 0,
196 sizeof(tsk->thread.kernel_context));
198 tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
202 if (unlikely(tsk->flags & PF_KTHREAD)) {
204 * Make sure we don't leak any kernel data to child's regs
205 * if kernel thread becomes a userspace thread in the future
207 memset(childregs, 0 , sizeof(struct pt_regs));
209 global_base = __core_reg_get(A1GbP);
210 childregs->ctx.AX[0].U1 = (unsigned long) global_base;
211 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
212 /* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */
213 childregs->ctx.DX[4].U1 = usp;
214 childregs->ctx.DX[3].U1 = kthread_arg;
215 tsk->thread.int_depth = 2;
220 * Get a pointer to where the new child's register block should have
222 * The Meta's stack grows upwards, and the context is the the first
223 * thing to be pushed by TBX (phew)
225 *childregs = *current_pt_regs();
226 /* Set the correct stack for the clone mode */
228 childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
229 tsk->thread.int_depth = 1;
231 /* set return value for child process */
232 childregs->ctx.DX[0].U0 = 0;
234 /* The TLS pointer is passed as an argument to sys_clone. */
235 if (clone_flags & CLONE_SETTLS)
236 tsk->thread.tls_ptr =
237 (__force void __user *)childregs->ctx.DX[1].U1;
239 #ifdef CONFIG_METAG_FPU
240 if (tsk->thread.fpu_context) {
241 struct meta_fpu_context *ctx;
243 ctx = kmemdup(tsk->thread.fpu_context,
244 sizeof(struct meta_fpu_context), GFP_ATOMIC);
245 tsk->thread.fpu_context = ctx;
249 #ifdef CONFIG_METAG_DSP
250 if (tsk->thread.dsp_context) {
251 struct meta_ext_context *ctx;
254 ctx = kmemdup(tsk->thread.dsp_context,
255 sizeof(struct meta_ext_context), GFP_ATOMIC);
256 for (i = 0; i < 2; i++)
257 ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
259 tsk->thread.dsp_context = ctx;
266 #ifdef CONFIG_METAG_FPU
267 static void alloc_fpu_context(struct thread_struct *thread)
269 thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
273 static void clear_fpu(struct thread_struct *thread)
275 thread->user_flags &= ~TBICTX_FPAC_BIT;
276 kfree(thread->fpu_context);
277 thread->fpu_context = NULL;
280 static void clear_fpu(struct thread_struct *thread)
285 #ifdef CONFIG_METAG_DSP
286 static void clear_dsp(struct thread_struct *thread)
288 if (thread->dsp_context) {
289 kfree(thread->dsp_context->ram[0]);
290 kfree(thread->dsp_context->ram[1]);
292 kfree(thread->dsp_context);
294 thread->dsp_context = NULL;
297 __core_reg_set(D0.8, 0);
300 static void clear_dsp(struct thread_struct *thread)
305 struct task_struct *__sched __switch_to(struct task_struct *prev,
306 struct task_struct *next)
310 to.Switch.pCtx = next->thread.kernel_context;
311 to.Switch.pPara = prev;
313 #ifdef CONFIG_METAG_FPU
314 if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
315 struct pt_regs *regs = task_pt_regs(prev);
318 state.Sig.SaveMask = prev->thread.user_flags;
319 state.Sig.pCtx = ®s->ctx;
321 if (!prev->thread.fpu_context)
322 alloc_fpu_context(&prev->thread);
323 if (prev->thread.fpu_context)
324 __TBICtxFPUSave(state, prev->thread.fpu_context);
327 * Force a restore of the FPU context next time this process is
330 if (prev->thread.fpu_context)
331 prev->thread.fpu_context->needs_restore = true;
335 from = __TBISwitch(to, &prev->thread.kernel_context);
337 /* Restore TLS pointer for this process. */
338 set_gateway_tls(current->thread.tls_ptr);
340 return (struct task_struct *) from.Switch.pPara;
343 void flush_thread(void)
345 clear_fpu(¤t->thread);
346 clear_dsp(¤t->thread);
350 * Free current thread data structures etc.
352 void exit_thread(struct task_struct *tsk)
354 clear_fpu(&tsk->thread);
355 clear_dsp(&tsk->thread);
358 /* TODO: figure out how to unwind the kernel stack here to figure out
359 * where we went to sleep. */
360 unsigned long get_wchan(struct task_struct *p)
365 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
367 /* Returning 0 indicates that the FPU state was not stored (as it was
372 #ifdef CONFIG_METAG_USER_TCM
374 #define ELF_MIN_ALIGN PAGE_SIZE
376 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
377 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
378 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
380 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
382 unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
383 struct elf_phdr *eppnt, int prot, int type,
384 unsigned long total_size)
386 unsigned long map_addr, size;
387 unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
388 unsigned long raw_size = eppnt->p_filesz + page_off;
389 unsigned long off = eppnt->p_offset - page_off;
390 unsigned int tcm_tag;
391 addr = ELF_PAGESTART(addr);
392 size = ELF_PAGEALIGN(raw_size);
394 /* mmap() will return -EINVAL if given a zero size, but a
395 * segment with zero filesize is perfectly valid */
399 tcm_tag = tcm_lookup_tag(addr);
401 if (tcm_tag != TCM_INVALID_TAG)
405 * total_size is the size of the ELF (interpreter) image.
406 * The _first_ mmap needs to know the full size, otherwise
407 * randomization might put this image into an overlapping
408 * position with the ELF binary image. (since size < total_size)
409 * So we first map the 'big' image - and unmap the remainder at
410 * the end. (which unmap is needed for ELF images with holes.)
413 total_size = ELF_PAGEALIGN(total_size);
414 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
415 if (!BAD_ADDR(map_addr))
416 vm_munmap(map_addr+size, total_size-size);
418 map_addr = vm_mmap(filep, addr, size, prot, type, off);
420 if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
421 struct tcm_allocation *tcm;
422 unsigned long tcm_addr;
424 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
428 tcm_addr = tcm_alloc(tcm_tag, raw_size);
429 if (tcm_addr != addr) {
435 tcm->addr = tcm_addr;
436 tcm->size = raw_size;
438 list_add(&tcm->list, ¤t->mm->context.tcm);
440 eppnt->p_vaddr = map_addr;
441 if (copy_from_user((void *) addr, (void __user *) map_addr,