2 * This file handles the architecture dependent parts of process handling.
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Hartmut Penner <hp@de.ibm.com>,
10 #include <linux/elf-randomize.h>
11 #include <linux/compiler.h>
12 #include <linux/cpu.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
16 #include <linux/elfcore.h>
17 #include <linux/smp.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/tick.h>
21 #include <linux/personality.h>
22 #include <linux/syscalls.h>
23 #include <linux/compat.h>
24 #include <linux/kprobes.h>
25 #include <linux/random.h>
26 #include <linux/module.h>
27 #include <linux/init_task.h>
29 #include <asm/processor.h>
30 #include <asm/vtimer.h>
35 #include <asm/switch_to.h>
36 #include <asm/runtime_instr.h>
39 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
42 * Return saved PC of a blocked thread. used in kernel/sched.
43 * resume in entry.S does not create a new stack frame, it
44 * just stores the registers %r6-%r15 to the frame given by
45 * schedule. We want to return the address of the caller of
46 * schedule, so we have to walk the backchain one time to
47 * find the frame schedule() store its return address.
49 unsigned long thread_saved_pc(struct task_struct *tsk)
51 struct stack_frame *sf, *low, *high;
53 if (!tsk || !task_stack_page(tsk))
55 low = task_stack_page(tsk);
56 high = (struct stack_frame *) task_pt_regs(tsk);
57 sf = (struct stack_frame *) tsk->thread.ksp;
58 if (sf <= low || sf > high)
60 sf = (struct stack_frame *) sf->back_chain;
61 if (sf <= low || sf > high)
66 extern void kernel_thread_starter(void);
69 * Free current thread data structures etc..
71 void exit_thread(struct task_struct *tsk)
75 void flush_thread(void)
79 void release_thread(struct task_struct *dead_task)
83 void arch_release_task_struct(struct task_struct *tsk)
85 runtime_instr_release(tsk);
88 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
91 * Save the floating-point or vector register state of the current
92 * task and set the CIF_FPU flag to lazy restore the FPU register
93 * state when returning to user space.
97 memcpy(dst, src, arch_task_struct_size);
98 dst->thread.fpu.regs = dst->thread.fpu.fprs;
102 int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
103 unsigned long arg, struct task_struct *p)
105 struct thread_info *ti;
108 struct stack_frame sf;
109 struct pt_regs childregs;
112 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
113 p->thread.ksp = (unsigned long) frame;
114 /* Save access registers to new thread structure. */
115 save_access_regs(&p->thread.acrs[0]);
116 /* start new process with ar4 pointing to the correct address space */
117 p->thread.mm_segment = get_fs();
118 /* Don't copy debug registers */
119 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
120 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
121 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
122 p->thread.per_flags = 0;
123 /* Initialize per thread user and system timer values */
124 ti = task_thread_info(p);
126 ti->system_timer = 0;
128 frame->sf.back_chain = 0;
129 /* new return point is ret_from_fork */
130 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
131 /* fake return stack for resume(), don't go back to schedule */
132 frame->sf.gprs[9] = (unsigned long) frame;
134 /* Store access registers to kernel stack of new process. */
135 if (unlikely(p->flags & PF_KTHREAD)) {
137 memset(&frame->childregs, 0, sizeof(struct pt_regs));
138 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
139 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
140 frame->childregs.psw.addr =
141 (unsigned long) kernel_thread_starter;
142 frame->childregs.gprs[9] = new_stackp; /* function */
143 frame->childregs.gprs[10] = arg;
144 frame->childregs.gprs[11] = (unsigned long) do_exit;
145 frame->childregs.orig_gpr2 = -1;
149 frame->childregs = *current_pt_regs();
150 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
151 frame->childregs.flags = 0;
153 frame->childregs.gprs[15] = new_stackp;
155 /* Don't copy runtime instrumentation info */
156 p->thread.ri_cb = NULL;
157 frame->childregs.psw.mask &= ~PSW_MASK_RI;
159 /* Set a new TLS ? */
160 if (clone_flags & CLONE_SETTLS) {
161 unsigned long tls = frame->childregs.gprs[6];
162 if (is_compat_task()) {
163 p->thread.acrs[0] = (unsigned int)tls;
165 p->thread.acrs[0] = (unsigned int)(tls >> 32);
166 p->thread.acrs[1] = (unsigned int)tls;
172 asmlinkage void execve_tail(void)
174 current->thread.fpu.fpc = 0;
175 asm volatile("sfpc %0" : : "d" (0));
179 * fill in the FPU structure for a core dump.
181 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
184 fpregs->fpc = current->thread.fpu.fpc;
187 convert_vx_to_fp((freg_t *)&fpregs->fprs,
188 current->thread.fpu.vxrs);
190 memcpy(&fpregs->fprs, current->thread.fpu.fprs,
191 sizeof(fpregs->fprs));
194 EXPORT_SYMBOL(dump_fpu);
196 unsigned long get_wchan(struct task_struct *p)
198 struct stack_frame *sf, *low, *high;
199 unsigned long return_address;
202 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
204 low = task_stack_page(p);
205 high = (struct stack_frame *) task_pt_regs(p);
206 sf = (struct stack_frame *) p->thread.ksp;
207 if (sf <= low || sf > high)
209 for (count = 0; count < 16; count++) {
210 sf = (struct stack_frame *) sf->back_chain;
211 if (sf <= low || sf > high)
213 return_address = sf->gprs[8];
214 if (!in_sched_functions(return_address))
215 return return_address;
220 unsigned long arch_align_stack(unsigned long sp)
222 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
223 sp -= get_random_int() & ~PAGE_MASK;
227 static inline unsigned long brk_rnd(void)
229 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
232 unsigned long arch_randomize_brk(struct mm_struct *mm)
236 ret = PAGE_ALIGN(mm->brk + brk_rnd());
237 return (ret > mm->brk) ? ret : mm->brk;