2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
41 #include <linux/elf-randomize.h>
43 #include <asm/pgtable.h>
45 #include <asm/processor.h>
48 #include <asm/machdep.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
54 #include <asm/debug.h>
56 #include <asm/firmware.h>
58 #include <asm/code-patching.h>
60 #include <asm/livepatch.h>
61 #include <asm/cpu_has_feature.h>
62 #include <asm/asm-prototypes.h>
64 #include <linux/kprobes.h>
65 #include <linux/kdebug.h>
67 /* Transactional Memory debug */
69 #define TM_DEBUG(x...) printk(KERN_INFO x)
71 #define TM_DEBUG(x...) do { } while(0)
74 extern unsigned long _get_SP(void);
76 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
77 static void check_if_tm_restore_required(struct task_struct *tsk)
80 * If we are saving the current thread's registers, and the
81 * thread is in a transactional state, set the TIF_RESTORE_TM
82 * bit so that we know to restore the registers before
83 * returning to userspace.
85 if (tsk == current && tsk->thread.regs &&
86 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
87 !test_thread_flag(TIF_RESTORE_TM)) {
88 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
89 set_thread_flag(TIF_RESTORE_TM);
93 static inline bool msr_tm_active(unsigned long msr)
95 return MSR_TM_ACTIVE(msr);
98 static inline bool msr_tm_active(unsigned long msr) { return false; }
99 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
100 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
102 bool strict_msr_control;
103 EXPORT_SYMBOL(strict_msr_control);
105 static int __init enable_strict_msr_control(char *str)
107 strict_msr_control = true;
108 pr_info("Enabling strict facility control\n");
112 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
114 unsigned long msr_check_and_set(unsigned long bits)
116 unsigned long oldmsr = mfmsr();
117 unsigned long newmsr;
119 newmsr = oldmsr | bits;
122 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
126 if (oldmsr != newmsr)
132 void __msr_check_and_clear(unsigned long bits)
134 unsigned long oldmsr = mfmsr();
135 unsigned long newmsr;
137 newmsr = oldmsr & ~bits;
140 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
144 if (oldmsr != newmsr)
147 EXPORT_SYMBOL(__msr_check_and_clear);
149 #ifdef CONFIG_PPC_FPU
150 void __giveup_fpu(struct task_struct *tsk)
155 msr = tsk->thread.regs->msr;
156 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
158 if (cpu_has_feature(CPU_FTR_VSX))
161 tsk->thread.regs->msr = msr;
164 void giveup_fpu(struct task_struct *tsk)
166 check_if_tm_restore_required(tsk);
168 msr_check_and_set(MSR_FP);
170 msr_check_and_clear(MSR_FP);
172 EXPORT_SYMBOL(giveup_fpu);
175 * Make sure the floating-point register state in the
176 * the thread_struct is up to date for task tsk.
178 void flush_fp_to_thread(struct task_struct *tsk)
180 if (tsk->thread.regs) {
182 * We need to disable preemption here because if we didn't,
183 * another process could get scheduled after the regs->msr
184 * test but before we have finished saving the FP registers
185 * to the thread_struct. That process could take over the
186 * FPU, and then when we get scheduled again we would store
187 * bogus values for the remaining FP registers.
190 if (tsk->thread.regs->msr & MSR_FP) {
192 * This should only ever be called for current or
193 * for a stopped child process. Since we save away
194 * the FP register state on context switch,
195 * there is something wrong if a stopped child appears
196 * to still have its FP state in the CPU registers.
198 BUG_ON(tsk != current);
204 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
206 void enable_kernel_fp(void)
208 unsigned long cpumsr;
210 WARN_ON(preemptible());
212 cpumsr = msr_check_and_set(MSR_FP);
214 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
215 check_if_tm_restore_required(current);
217 * If a thread has already been reclaimed then the
218 * checkpointed registers are on the CPU but have definitely
219 * been saved by the reclaim code. Don't need to and *cannot*
220 * giveup as this would save to the 'live' structure not the
221 * checkpointed structure.
223 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
225 __giveup_fpu(current);
228 EXPORT_SYMBOL(enable_kernel_fp);
230 static int restore_fp(struct task_struct *tsk) {
231 if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
232 load_fp_state(¤t->thread.fp_state);
233 current->thread.load_fp++;
239 static int restore_fp(struct task_struct *tsk) { return 0; }
240 #endif /* CONFIG_PPC_FPU */
242 #ifdef CONFIG_ALTIVEC
243 #define loadvec(thr) ((thr).load_vec)
245 static void __giveup_altivec(struct task_struct *tsk)
250 msr = tsk->thread.regs->msr;
253 if (cpu_has_feature(CPU_FTR_VSX))
256 tsk->thread.regs->msr = msr;
259 void giveup_altivec(struct task_struct *tsk)
261 check_if_tm_restore_required(tsk);
263 msr_check_and_set(MSR_VEC);
264 __giveup_altivec(tsk);
265 msr_check_and_clear(MSR_VEC);
267 EXPORT_SYMBOL(giveup_altivec);
269 void enable_kernel_altivec(void)
271 unsigned long cpumsr;
273 WARN_ON(preemptible());
275 cpumsr = msr_check_and_set(MSR_VEC);
277 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
278 check_if_tm_restore_required(current);
280 * If a thread has already been reclaimed then the
281 * checkpointed registers are on the CPU but have definitely
282 * been saved by the reclaim code. Don't need to and *cannot*
283 * giveup as this would save to the 'live' structure not the
284 * checkpointed structure.
286 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
288 __giveup_altivec(current);
291 EXPORT_SYMBOL(enable_kernel_altivec);
294 * Make sure the VMX/Altivec register state in the
295 * the thread_struct is up to date for task tsk.
297 void flush_altivec_to_thread(struct task_struct *tsk)
299 if (tsk->thread.regs) {
301 if (tsk->thread.regs->msr & MSR_VEC) {
302 BUG_ON(tsk != current);
308 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
310 static int restore_altivec(struct task_struct *tsk)
312 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
313 (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
314 load_vr_state(&tsk->thread.vr_state);
315 tsk->thread.used_vr = 1;
316 tsk->thread.load_vec++;
323 #define loadvec(thr) 0
324 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
325 #endif /* CONFIG_ALTIVEC */
328 static void __giveup_vsx(struct task_struct *tsk)
330 if (tsk->thread.regs->msr & MSR_FP)
332 if (tsk->thread.regs->msr & MSR_VEC)
333 __giveup_altivec(tsk);
334 tsk->thread.regs->msr &= ~MSR_VSX;
337 static void giveup_vsx(struct task_struct *tsk)
339 check_if_tm_restore_required(tsk);
341 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
343 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
346 static void save_vsx(struct task_struct *tsk)
348 if (tsk->thread.regs->msr & MSR_FP)
350 if (tsk->thread.regs->msr & MSR_VEC)
354 void enable_kernel_vsx(void)
356 unsigned long cpumsr;
358 WARN_ON(preemptible());
360 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
362 if (current->thread.regs &&
363 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
364 check_if_tm_restore_required(current);
366 * If a thread has already been reclaimed then the
367 * checkpointed registers are on the CPU but have definitely
368 * been saved by the reclaim code. Don't need to and *cannot*
369 * giveup as this would save to the 'live' structure not the
370 * checkpointed structure.
372 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
374 if (current->thread.regs->msr & MSR_FP)
375 __giveup_fpu(current);
376 if (current->thread.regs->msr & MSR_VEC)
377 __giveup_altivec(current);
378 __giveup_vsx(current);
381 EXPORT_SYMBOL(enable_kernel_vsx);
383 void flush_vsx_to_thread(struct task_struct *tsk)
385 if (tsk->thread.regs) {
387 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
388 BUG_ON(tsk != current);
394 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
396 static int restore_vsx(struct task_struct *tsk)
398 if (cpu_has_feature(CPU_FTR_VSX)) {
399 tsk->thread.used_vsr = 1;
406 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
407 static inline void save_vsx(struct task_struct *tsk) { }
408 #endif /* CONFIG_VSX */
411 void giveup_spe(struct task_struct *tsk)
413 check_if_tm_restore_required(tsk);
415 msr_check_and_set(MSR_SPE);
417 msr_check_and_clear(MSR_SPE);
419 EXPORT_SYMBOL(giveup_spe);
421 void enable_kernel_spe(void)
423 WARN_ON(preemptible());
425 msr_check_and_set(MSR_SPE);
427 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
428 check_if_tm_restore_required(current);
429 __giveup_spe(current);
432 EXPORT_SYMBOL(enable_kernel_spe);
434 void flush_spe_to_thread(struct task_struct *tsk)
436 if (tsk->thread.regs) {
438 if (tsk->thread.regs->msr & MSR_SPE) {
439 BUG_ON(tsk != current);
440 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
446 #endif /* CONFIG_SPE */
448 static unsigned long msr_all_available;
450 static int __init init_msr_all_available(void)
452 #ifdef CONFIG_PPC_FPU
453 msr_all_available |= MSR_FP;
455 #ifdef CONFIG_ALTIVEC
456 if (cpu_has_feature(CPU_FTR_ALTIVEC))
457 msr_all_available |= MSR_VEC;
460 if (cpu_has_feature(CPU_FTR_VSX))
461 msr_all_available |= MSR_VSX;
464 if (cpu_has_feature(CPU_FTR_SPE))
465 msr_all_available |= MSR_SPE;
470 early_initcall(init_msr_all_available);
472 void giveup_all(struct task_struct *tsk)
474 unsigned long usermsr;
476 if (!tsk->thread.regs)
479 check_if_tm_restore_required(tsk);
481 usermsr = tsk->thread.regs->msr;
483 if ((usermsr & msr_all_available) == 0)
486 msr_check_and_set(msr_all_available);
488 #ifdef CONFIG_PPC_FPU
489 if (usermsr & MSR_FP)
492 #ifdef CONFIG_ALTIVEC
493 if (usermsr & MSR_VEC)
494 __giveup_altivec(tsk);
497 if (usermsr & MSR_VSX)
501 if (usermsr & MSR_SPE)
505 msr_check_and_clear(msr_all_available);
507 EXPORT_SYMBOL(giveup_all);
509 void restore_math(struct pt_regs *regs)
513 if (!msr_tm_active(regs->msr) &&
514 !current->thread.load_fp && !loadvec(current->thread))
518 msr_check_and_set(msr_all_available);
521 * Only reload if the bit is not set in the user MSR, the bit BEING set
522 * indicates that the registers are hot
524 if ((!(msr & MSR_FP)) && restore_fp(current))
525 msr |= MSR_FP | current->thread.fpexc_mode;
527 if ((!(msr & MSR_VEC)) && restore_altivec(current))
530 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
531 restore_vsx(current)) {
535 msr_check_and_clear(msr_all_available);
540 void save_all(struct task_struct *tsk)
542 unsigned long usermsr;
544 if (!tsk->thread.regs)
547 usermsr = tsk->thread.regs->msr;
549 if ((usermsr & msr_all_available) == 0)
552 msr_check_and_set(msr_all_available);
555 * Saving the way the register space is in hardware, save_vsx boils
556 * down to a save_fpu() and save_altivec()
558 if (usermsr & MSR_VSX) {
561 if (usermsr & MSR_FP)
564 if (usermsr & MSR_VEC)
568 if (usermsr & MSR_SPE)
571 msr_check_and_clear(msr_all_available);
574 void flush_all_to_thread(struct task_struct *tsk)
576 if (tsk->thread.regs) {
578 BUG_ON(tsk != current);
580 if (tsk->thread.regs->msr & MSR_SPE)
581 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
588 EXPORT_SYMBOL(flush_all_to_thread);
590 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
591 void do_send_trap(struct pt_regs *regs, unsigned long address,
592 unsigned long error_code, int signal_code, int breakpt)
596 current->thread.trap_nr = signal_code;
597 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
598 11, SIGSEGV) == NOTIFY_STOP)
601 /* Deliver the signal to userspace */
602 info.si_signo = SIGTRAP;
603 info.si_errno = breakpt; /* breakpoint or watchpoint id */
604 info.si_code = signal_code;
605 info.si_addr = (void __user *)address;
606 force_sig_info(SIGTRAP, &info, current);
608 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
609 void do_break (struct pt_regs *regs, unsigned long address,
610 unsigned long error_code)
614 current->thread.trap_nr = TRAP_HWBKPT;
615 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
616 11, SIGSEGV) == NOTIFY_STOP)
619 if (debugger_break_match(regs))
622 /* Clear the breakpoint */
623 hw_breakpoint_disable();
625 /* Deliver the signal to userspace */
626 info.si_signo = SIGTRAP;
628 info.si_code = TRAP_HWBKPT;
629 info.si_addr = (void __user *)address;
630 force_sig_info(SIGTRAP, &info, current);
632 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
634 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
636 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
638 * Set the debug registers back to their default "safe" values.
640 static void set_debug_reg_defaults(struct thread_struct *thread)
642 thread->debug.iac1 = thread->debug.iac2 = 0;
643 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
644 thread->debug.iac3 = thread->debug.iac4 = 0;
646 thread->debug.dac1 = thread->debug.dac2 = 0;
647 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
648 thread->debug.dvc1 = thread->debug.dvc2 = 0;
650 thread->debug.dbcr0 = 0;
653 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
655 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
656 DBCR1_IAC3US | DBCR1_IAC4US;
658 * Force Data Address Compare User/Supervisor bits to be User-only
659 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
661 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
663 thread->debug.dbcr1 = 0;
667 static void prime_debug_regs(struct debug_reg *debug)
670 * We could have inherited MSR_DE from userspace, since
671 * it doesn't get cleared on exception entry. Make sure
672 * MSR_DE is clear before we enable any debug events.
674 mtmsr(mfmsr() & ~MSR_DE);
676 mtspr(SPRN_IAC1, debug->iac1);
677 mtspr(SPRN_IAC2, debug->iac2);
678 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
679 mtspr(SPRN_IAC3, debug->iac3);
680 mtspr(SPRN_IAC4, debug->iac4);
682 mtspr(SPRN_DAC1, debug->dac1);
683 mtspr(SPRN_DAC2, debug->dac2);
684 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
685 mtspr(SPRN_DVC1, debug->dvc1);
686 mtspr(SPRN_DVC2, debug->dvc2);
688 mtspr(SPRN_DBCR0, debug->dbcr0);
689 mtspr(SPRN_DBCR1, debug->dbcr1);
691 mtspr(SPRN_DBCR2, debug->dbcr2);
695 * Unless neither the old or new thread are making use of the
696 * debug registers, set the debug registers from the values
697 * stored in the new thread.
699 void switch_booke_debug_regs(struct debug_reg *new_debug)
701 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
702 || (new_debug->dbcr0 & DBCR0_IDM))
703 prime_debug_regs(new_debug);
705 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
706 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
707 #ifndef CONFIG_HAVE_HW_BREAKPOINT
708 static void set_debug_reg_defaults(struct thread_struct *thread)
710 thread->hw_brk.address = 0;
711 thread->hw_brk.type = 0;
712 set_breakpoint(&thread->hw_brk);
714 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
715 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
717 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
718 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
720 mtspr(SPRN_DAC1, dabr);
721 #ifdef CONFIG_PPC_47x
726 #elif defined(CONFIG_PPC_BOOK3S)
727 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
729 mtspr(SPRN_DABR, dabr);
730 if (cpu_has_feature(CPU_FTR_DABRX))
731 mtspr(SPRN_DABRX, dabrx);
735 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
741 static inline int set_dabr(struct arch_hw_breakpoint *brk)
743 unsigned long dabr, dabrx;
745 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
746 dabrx = ((brk->type >> 3) & 0x7);
749 return ppc_md.set_dabr(dabr, dabrx);
751 return __set_dabr(dabr, dabrx);
754 static inline int set_dawr(struct arch_hw_breakpoint *brk)
756 unsigned long dawr, dawrx, mrd;
760 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
761 << (63 - 58); //* read/write bits */
762 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
763 << (63 - 59); //* translate */
764 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
765 >> 3; //* PRIM bits */
766 /* dawr length is stored in field MDR bits 48:53. Matches range in
767 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
769 brk->len is in bytes.
770 This aligns up to double word size, shifts and does the bias.
772 mrd = ((brk->len + 7) >> 3) - 1;
773 dawrx |= (mrd & 0x3f) << (63 - 53);
776 return ppc_md.set_dawr(dawr, dawrx);
777 mtspr(SPRN_DAWR, dawr);
778 mtspr(SPRN_DAWRX, dawrx);
782 void __set_breakpoint(struct arch_hw_breakpoint *brk)
784 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
786 if (cpu_has_feature(CPU_FTR_DAWR))
792 void set_breakpoint(struct arch_hw_breakpoint *brk)
795 __set_breakpoint(brk);
800 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
803 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
804 struct arch_hw_breakpoint *b)
806 if (a->address != b->address)
808 if (a->type != b->type)
810 if (a->len != b->len)
815 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
817 static inline bool tm_enabled(struct task_struct *tsk)
819 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
822 static void tm_reclaim_thread(struct thread_struct *thr,
823 struct thread_info *ti, uint8_t cause)
826 * Use the current MSR TM suspended bit to track if we have
827 * checkpointed state outstanding.
828 * On signal delivery, we'd normally reclaim the checkpointed
829 * state to obtain stack pointer (see:get_tm_stackpointer()).
830 * This will then directly return to userspace without going
831 * through __switch_to(). However, if the stack frame is bad,
832 * we need to exit this thread which calls __switch_to() which
833 * will again attempt to reclaim the already saved tm state.
834 * Hence we need to check that we've not already reclaimed
836 * We do this using the current MSR, rather tracking it in
837 * some specific thread_struct bit, as it has the additional
838 * benefit of checking for a potential TM bad thing exception.
840 if (!MSR_TM_SUSPENDED(mfmsr()))
844 * If we are in a transaction and FP is off then we can't have
845 * used FP inside that transaction. Hence the checkpointed
846 * state is the same as the live state. We need to copy the
847 * live state to the checkpointed state so that when the
848 * transaction is restored, the checkpointed state is correct
849 * and the aborted transaction sees the correct state. We use
850 * ckpt_regs.msr here as that's what tm_reclaim will use to
851 * determine if it's going to write the checkpointed state or
852 * not. So either this will write the checkpointed registers,
853 * or reclaim will. Similarly for VMX.
855 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
856 memcpy(&thr->ckfp_state, &thr->fp_state,
857 sizeof(struct thread_fp_state));
858 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
859 memcpy(&thr->ckvr_state, &thr->vr_state,
860 sizeof(struct thread_vr_state));
862 giveup_all(container_of(thr, struct task_struct, thread));
864 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
867 void tm_reclaim_current(uint8_t cause)
870 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
873 static inline void tm_reclaim_task(struct task_struct *tsk)
875 /* We have to work out if we're switching from/to a task that's in the
876 * middle of a transaction.
878 * In switching we need to maintain a 2nd register state as
879 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
880 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
883 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
885 struct thread_struct *thr = &tsk->thread;
890 if (!MSR_TM_ACTIVE(thr->regs->msr))
891 goto out_and_saveregs;
893 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
894 "ccr=%lx, msr=%lx, trap=%lx)\n",
895 tsk->pid, thr->regs->nip,
896 thr->regs->ccr, thr->regs->msr,
899 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
901 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
905 /* Always save the regs here, even if a transaction's not active.
906 * This context-switches a thread's TM info SPRs. We do it here to
907 * be consistent with the restore path (in recheckpoint) which
908 * cannot happen later in _switch().
913 extern void __tm_recheckpoint(struct thread_struct *thread,
914 unsigned long orig_msr);
916 void tm_recheckpoint(struct thread_struct *thread,
917 unsigned long orig_msr)
921 if (!(thread->regs->msr & MSR_TM))
924 /* We really can't be interrupted here as the TEXASR registers can't
925 * change and later in the trecheckpoint code, we have a userspace R1.
926 * So let's hard disable over this region.
928 local_irq_save(flags);
931 /* The TM SPRs are restored here, so that TEXASR.FS can be set
932 * before the trecheckpoint and no explosion occurs.
934 tm_restore_sprs(thread);
936 __tm_recheckpoint(thread, orig_msr);
938 local_irq_restore(flags);
941 static inline void tm_recheckpoint_new_task(struct task_struct *new)
945 if (!cpu_has_feature(CPU_FTR_TM))
948 /* Recheckpoint the registers of the thread we're about to switch to.
950 * If the task was using FP, we non-lazily reload both the original and
951 * the speculative FP register states. This is because the kernel
952 * doesn't see if/when a TM rollback occurs, so if we take an FP
953 * unavailable later, we are unable to determine which set of FP regs
954 * need to be restored.
956 if (!tm_enabled(new))
959 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
960 tm_restore_sprs(&new->thread);
963 msr = new->thread.ckpt_regs.msr;
964 /* Recheckpoint to restore original checkpointed register state. */
965 TM_DEBUG("*** tm_recheckpoint of pid %d "
966 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
967 new->pid, new->thread.regs->msr, msr);
969 tm_recheckpoint(&new->thread, msr);
972 * The checkpointed state has been restored but the live state has
973 * not, ensure all the math functionality is turned off to trigger
974 * restore_math() to reload.
976 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
978 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
979 "(kernel msr 0x%lx)\n",
983 static inline void __switch_to_tm(struct task_struct *prev,
984 struct task_struct *new)
986 if (cpu_has_feature(CPU_FTR_TM)) {
987 if (tm_enabled(prev) || tm_enabled(new))
990 if (tm_enabled(prev)) {
991 prev->thread.load_tm++;
992 tm_reclaim_task(prev);
993 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
994 prev->thread.regs->msr &= ~MSR_TM;
997 tm_recheckpoint_new_task(new);
1002 * This is called if we are on the way out to userspace and the
1003 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1004 * FP and/or vector state and does so if necessary.
1005 * If userspace is inside a transaction (whether active or
1006 * suspended) and FP/VMX/VSX instructions have ever been enabled
1007 * inside that transaction, then we have to keep them enabled
1008 * and keep the FP/VMX/VSX state loaded while ever the transaction
1009 * continues. The reason is that if we didn't, and subsequently
1010 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1011 * we don't know whether it's the same transaction, and thus we
1012 * don't know which of the checkpointed state and the transactional
1015 void restore_tm_state(struct pt_regs *regs)
1017 unsigned long msr_diff;
1020 * This is the only moment we should clear TIF_RESTORE_TM as
1021 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1022 * again, anything else could lead to an incorrect ckpt_msr being
1023 * saved and therefore incorrect signal contexts.
1025 clear_thread_flag(TIF_RESTORE_TM);
1026 if (!MSR_TM_ACTIVE(regs->msr))
1029 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1030 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1032 /* Ensure that restore_math() will restore */
1033 if (msr_diff & MSR_FP)
1034 current->thread.load_fp = 1;
1035 #ifdef CONFIG_ALTIVEC
1036 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1037 current->thread.load_vec = 1;
1041 regs->msr |= msr_diff;
1045 #define tm_recheckpoint_new_task(new)
1046 #define __switch_to_tm(prev, new)
1047 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1049 static inline void save_sprs(struct thread_struct *t)
1051 #ifdef CONFIG_ALTIVEC
1052 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1053 t->vrsave = mfspr(SPRN_VRSAVE);
1055 #ifdef CONFIG_PPC_BOOK3S_64
1056 if (cpu_has_feature(CPU_FTR_DSCR))
1057 t->dscr = mfspr(SPRN_DSCR);
1059 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1060 t->bescr = mfspr(SPRN_BESCR);
1061 t->ebbhr = mfspr(SPRN_EBBHR);
1062 t->ebbrr = mfspr(SPRN_EBBRR);
1064 t->fscr = mfspr(SPRN_FSCR);
1067 * Note that the TAR is not available for use in the kernel.
1068 * (To provide this, the TAR should be backed up/restored on
1069 * exception entry/exit instead, and be in pt_regs. FIXME,
1070 * this should be in pt_regs anyway (for debug).)
1072 t->tar = mfspr(SPRN_TAR);
1075 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1076 /* Conditionally save Load Monitor registers, if enabled */
1077 if (t->fscr & FSCR_LM) {
1078 t->lmrr = mfspr(SPRN_LMRR);
1079 t->lmser = mfspr(SPRN_LMSER);
1085 static inline void restore_sprs(struct thread_struct *old_thread,
1086 struct thread_struct *new_thread)
1088 #ifdef CONFIG_ALTIVEC
1089 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1090 old_thread->vrsave != new_thread->vrsave)
1091 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1093 #ifdef CONFIG_PPC_BOOK3S_64
1094 if (cpu_has_feature(CPU_FTR_DSCR)) {
1095 u64 dscr = get_paca()->dscr_default;
1096 if (new_thread->dscr_inherit)
1097 dscr = new_thread->dscr;
1099 if (old_thread->dscr != dscr)
1100 mtspr(SPRN_DSCR, dscr);
1103 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1104 if (old_thread->bescr != new_thread->bescr)
1105 mtspr(SPRN_BESCR, new_thread->bescr);
1106 if (old_thread->ebbhr != new_thread->ebbhr)
1107 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1108 if (old_thread->ebbrr != new_thread->ebbrr)
1109 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1111 if (old_thread->fscr != new_thread->fscr)
1112 mtspr(SPRN_FSCR, new_thread->fscr);
1114 if (old_thread->tar != new_thread->tar)
1115 mtspr(SPRN_TAR, new_thread->tar);
1118 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1119 /* Conditionally restore Load Monitor registers, if enabled */
1120 if (new_thread->fscr & FSCR_LM) {
1121 if (old_thread->lmrr != new_thread->lmrr)
1122 mtspr(SPRN_LMRR, new_thread->lmrr);
1123 if (old_thread->lmser != new_thread->lmser)
1124 mtspr(SPRN_LMSER, new_thread->lmser);
1130 struct task_struct *__switch_to(struct task_struct *prev,
1131 struct task_struct *new)
1133 struct thread_struct *new_thread, *old_thread;
1134 struct task_struct *last;
1135 #ifdef CONFIG_PPC_BOOK3S_64
1136 struct ppc64_tlb_batch *batch;
1139 new_thread = &new->thread;
1140 old_thread = ¤t->thread;
1142 WARN_ON(!irqs_disabled());
1146 * Collect processor utilization data per process
1148 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1149 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1150 long unsigned start_tb, current_tb;
1151 start_tb = old_thread->start_tb;
1152 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1153 old_thread->accum_tb += (current_tb - start_tb);
1154 new_thread->start_tb = current_tb;
1156 #endif /* CONFIG_PPC64 */
1158 #ifdef CONFIG_PPC_STD_MMU_64
1159 batch = this_cpu_ptr(&ppc64_tlb_batch);
1160 if (batch->active) {
1161 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1163 __flush_tlb_pending(batch);
1166 #endif /* CONFIG_PPC_STD_MMU_64 */
1168 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1169 switch_booke_debug_regs(&new->thread.debug);
1172 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1175 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1176 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1177 __set_breakpoint(&new->thread.hw_brk);
1178 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1182 * We need to save SPRs before treclaim/trecheckpoint as these will
1183 * change a number of them.
1185 save_sprs(&prev->thread);
1187 /* Save FPU, Altivec, VSX and SPE state */
1190 __switch_to_tm(prev, new);
1193 * We can't take a PMU exception inside _switch() since there is a
1194 * window where the kernel stack SLB and the kernel stack are out
1195 * of sync. Hard disable here.
1200 * Call restore_sprs() before calling _switch(). If we move it after
1201 * _switch() then we miss out on calling it for new tasks. The reason
1202 * for this is we manually create a stack frame for new tasks that
1203 * directly returns through ret_from_fork() or
1204 * ret_from_kernel_thread(). See copy_thread() for details.
1206 restore_sprs(old_thread, new_thread);
1208 last = _switch(old_thread, new_thread);
1210 #ifdef CONFIG_PPC_STD_MMU_64
1211 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1212 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1213 batch = this_cpu_ptr(&ppc64_tlb_batch);
1217 if (current_thread_info()->task->thread.regs)
1218 restore_math(current_thread_info()->task->thread.regs);
1219 #endif /* CONFIG_PPC_STD_MMU_64 */
1224 static int instructions_to_print = 16;
1226 static void show_instructions(struct pt_regs *regs)
1229 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1232 printk("Instruction dump:");
1234 for (i = 0; i < instructions_to_print; i++) {
1240 #if !defined(CONFIG_BOOKE)
1241 /* If executing with the IMMU off, adjust pc rather
1242 * than print XXXXXXXX.
1244 if (!(regs->msr & MSR_IR))
1245 pc = (unsigned long)phys_to_virt(pc);
1248 if (!__kernel_text_address(pc) ||
1249 probe_kernel_address((unsigned int __user *)pc, instr)) {
1250 pr_cont("XXXXXXXX ");
1252 if (regs->nip == pc)
1253 pr_cont("<%08x> ", instr);
1255 pr_cont("%08x ", instr);
1269 static struct regbit msr_bits[] = {
1270 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1292 #ifndef CONFIG_BOOKE
1299 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1303 for (; bits->bit; ++bits)
1304 if (val & bits->bit) {
1305 pr_cont("%s%s", s, bits->name);
1310 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1311 static struct regbit msr_tm_bits[] = {
1318 static void print_tm_bits(unsigned long val)
1321 * This only prints something if at least one of the TM bit is set.
1322 * Inside the TM[], the output means:
1323 * E: Enabled (bit 32)
1324 * S: Suspended (bit 33)
1325 * T: Transactional (bit 34)
1327 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1329 print_bits(val, msr_tm_bits, "");
1334 static void print_tm_bits(unsigned long val) {}
1337 static void print_msr_bits(unsigned long val)
1340 print_bits(val, msr_bits, ",");
1346 #define REG "%016lx"
1347 #define REGS_PER_LINE 4
1348 #define LAST_VOLATILE 13
1351 #define REGS_PER_LINE 8
1352 #define LAST_VOLATILE 12
1355 void show_regs(struct pt_regs * regs)
1359 show_regs_print_info(KERN_DEFAULT);
1361 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1362 regs->nip, regs->link, regs->ctr);
1363 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1364 regs, regs->trap, print_tainted(), init_utsname()->release);
1365 printk("MSR: "REG" ", regs->msr);
1366 print_msr_bits(regs->msr);
1367 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1369 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1370 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1371 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1372 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1373 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1375 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1378 pr_cont("SOFTE: %ld ", regs->softe);
1380 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1381 if (MSR_TM_ACTIVE(regs->msr))
1382 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1385 for (i = 0; i < 32; i++) {
1386 if ((i % REGS_PER_LINE) == 0)
1387 pr_cont("\nGPR%02d: ", i);
1388 pr_cont(REG " ", regs->gpr[i]);
1389 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1393 #ifdef CONFIG_KALLSYMS
1395 * Lookup NIP late so we have the best change of getting the
1396 * above info out without failing
1398 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1399 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1401 show_stack(current, (unsigned long *) regs->gpr[1]);
1402 if (!user_mode(regs))
1403 show_instructions(regs);
1406 void flush_thread(void)
1408 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1409 flush_ptrace_hw_breakpoint(current);
1410 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1411 set_debug_reg_defaults(¤t->thread);
1412 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1416 release_thread(struct task_struct *t)
1421 * this gets called so that we can store coprocessor state into memory and
1422 * copy the current task into the new thread.
1424 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1426 flush_all_to_thread(src);
1428 * Flush TM state out so we can copy it. __switch_to_tm() does this
1429 * flush but it removes the checkpointed state from the current CPU and
1430 * transitions the CPU out of TM mode. Hence we need to call
1431 * tm_recheckpoint_new_task() (on the same task) to restore the
1432 * checkpointed state back and the TM mode.
1434 * Can't pass dst because it isn't ready. Doesn't matter, passing
1435 * dst is only important for __switch_to()
1437 __switch_to_tm(src, src);
1441 clear_task_ebb(dst);
1446 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1448 #ifdef CONFIG_PPC_STD_MMU_64
1449 unsigned long sp_vsid;
1450 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1452 if (radix_enabled())
1455 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1456 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1457 << SLB_VSID_SHIFT_1T;
1459 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1461 sp_vsid |= SLB_VSID_KERNEL | llp;
1462 p->thread.ksp_vsid = sp_vsid;
1471 * Copy architecture-specific thread state
1473 int copy_thread(unsigned long clone_flags, unsigned long usp,
1474 unsigned long kthread_arg, struct task_struct *p)
1476 struct pt_regs *childregs, *kregs;
1477 extern void ret_from_fork(void);
1478 extern void ret_from_kernel_thread(void);
1480 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1481 struct thread_info *ti = task_thread_info(p);
1483 klp_init_thread_info(ti);
1485 /* Copy registers */
1486 sp -= sizeof(struct pt_regs);
1487 childregs = (struct pt_regs *) sp;
1488 if (unlikely(p->flags & PF_KTHREAD)) {
1490 memset(childregs, 0, sizeof(struct pt_regs));
1491 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1494 childregs->gpr[14] = ppc_function_entry((void *)usp);
1496 clear_tsk_thread_flag(p, TIF_32BIT);
1497 childregs->softe = 1;
1499 childregs->gpr[15] = kthread_arg;
1500 p->thread.regs = NULL; /* no user register state */
1501 ti->flags |= _TIF_RESTOREALL;
1502 f = ret_from_kernel_thread;
1505 struct pt_regs *regs = current_pt_regs();
1506 CHECK_FULL_REGS(regs);
1509 childregs->gpr[1] = usp;
1510 p->thread.regs = childregs;
1511 childregs->gpr[3] = 0; /* Result from fork() */
1512 if (clone_flags & CLONE_SETTLS) {
1514 if (!is_32bit_task())
1515 childregs->gpr[13] = childregs->gpr[6];
1518 childregs->gpr[2] = childregs->gpr[6];
1523 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1524 sp -= STACK_FRAME_OVERHEAD;
1527 * The way this works is that at some point in the future
1528 * some task will call _switch to switch to the new task.
1529 * That will pop off the stack frame created below and start
1530 * the new task running at ret_from_fork. The new task will
1531 * do some house keeping and then return from the fork or clone
1532 * system call, using the stack frame created above.
1534 ((unsigned long *)sp)[0] = 0;
1535 sp -= sizeof(struct pt_regs);
1536 kregs = (struct pt_regs *) sp;
1537 sp -= STACK_FRAME_OVERHEAD;
1540 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1541 _ALIGN_UP(sizeof(struct thread_info), 16);
1543 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1544 p->thread.ptrace_bps[0] = NULL;
1547 p->thread.fp_save_area = NULL;
1548 #ifdef CONFIG_ALTIVEC
1549 p->thread.vr_save_area = NULL;
1552 setup_ksp_vsid(p, sp);
1555 if (cpu_has_feature(CPU_FTR_DSCR)) {
1556 p->thread.dscr_inherit = current->thread.dscr_inherit;
1557 p->thread.dscr = mfspr(SPRN_DSCR);
1559 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1560 p->thread.ppr = INIT_PPR;
1562 kregs->nip = ppc_function_entry(f);
1567 * Set up a thread for executing a new program
1569 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1572 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1576 * If we exec out of a kernel thread then thread.regs will not be
1579 if (!current->thread.regs) {
1580 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1581 current->thread.regs = regs - 1;
1584 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1586 * Clear any transactional state, we're exec()ing. The cause is
1587 * not important as there will never be a recheckpoint so it's not
1590 if (MSR_TM_SUSPENDED(mfmsr()))
1591 tm_reclaim_current(0);
1594 memset(regs->gpr, 0, sizeof(regs->gpr));
1602 * We have just cleared all the nonvolatile GPRs, so make
1603 * FULL_REGS(regs) return true. This is necessary to allow
1604 * ptrace to examine the thread immediately after exec.
1611 regs->msr = MSR_USER;
1613 if (!is_32bit_task()) {
1614 unsigned long entry;
1616 if (is_elf2_task()) {
1617 /* Look ma, no function descriptors! */
1622 * The latest iteration of the ABI requires that when
1623 * calling a function (at its global entry point),
1624 * the caller must ensure r12 holds the entry point
1625 * address (so that the function can quickly
1626 * establish addressability).
1628 regs->gpr[12] = start;
1629 /* Make sure that's restored on entry to userspace. */
1630 set_thread_flag(TIF_RESTOREALL);
1634 /* start is a relocated pointer to the function
1635 * descriptor for the elf _start routine. The first
1636 * entry in the function descriptor is the entry
1637 * address of _start and the second entry is the TOC
1638 * value we need to use.
1640 __get_user(entry, (unsigned long __user *)start);
1641 __get_user(toc, (unsigned long __user *)start+1);
1643 /* Check whether the e_entry function descriptor entries
1644 * need to be relocated before we can use them.
1646 if (load_addr != 0) {
1653 regs->msr = MSR_USER64;
1657 regs->msr = MSR_USER32;
1661 current->thread.used_vsr = 0;
1663 current->thread.load_fp = 0;
1664 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1665 current->thread.fp_save_area = NULL;
1666 #ifdef CONFIG_ALTIVEC
1667 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1668 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1669 current->thread.vr_save_area = NULL;
1670 current->thread.vrsave = 0;
1671 current->thread.used_vr = 0;
1672 current->thread.load_vec = 0;
1673 #endif /* CONFIG_ALTIVEC */
1675 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1676 current->thread.acc = 0;
1677 current->thread.spefscr = 0;
1678 current->thread.used_spe = 0;
1679 #endif /* CONFIG_SPE */
1680 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1681 current->thread.tm_tfhar = 0;
1682 current->thread.tm_texasr = 0;
1683 current->thread.tm_tfiar = 0;
1684 current->thread.load_tm = 0;
1685 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1687 EXPORT_SYMBOL(start_thread);
1689 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1690 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1692 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1694 struct pt_regs *regs = tsk->thread.regs;
1696 /* This is a bit hairy. If we are an SPE enabled processor
1697 * (have embedded fp) we store the IEEE exception enable flags in
1698 * fpexc_mode. fpexc_mode is also used for setting FP exception
1699 * mode (asyn, precise, disabled) for 'Classic' FP. */
1700 if (val & PR_FP_EXC_SW_ENABLE) {
1702 if (cpu_has_feature(CPU_FTR_SPE)) {
1704 * When the sticky exception bits are set
1705 * directly by userspace, it must call prctl
1706 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1707 * in the existing prctl settings) or
1708 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1709 * the bits being set). <fenv.h> functions
1710 * saving and restoring the whole
1711 * floating-point environment need to do so
1712 * anyway to restore the prctl settings from
1713 * the saved environment.
1715 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1716 tsk->thread.fpexc_mode = val &
1717 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1727 /* on a CONFIG_SPE this does not hurt us. The bits that
1728 * __pack_fe01 use do not overlap with bits used for
1729 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1730 * on CONFIG_SPE implementations are reserved so writing to
1731 * them does not change anything */
1732 if (val > PR_FP_EXC_PRECISE)
1734 tsk->thread.fpexc_mode = __pack_fe01(val);
1735 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1736 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1737 | tsk->thread.fpexc_mode;
1741 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1745 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1747 if (cpu_has_feature(CPU_FTR_SPE)) {
1749 * When the sticky exception bits are set
1750 * directly by userspace, it must call prctl
1751 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1752 * in the existing prctl settings) or
1753 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1754 * the bits being set). <fenv.h> functions
1755 * saving and restoring the whole
1756 * floating-point environment need to do so
1757 * anyway to restore the prctl settings from
1758 * the saved environment.
1760 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1761 val = tsk->thread.fpexc_mode;
1768 val = __unpack_fe01(tsk->thread.fpexc_mode);
1769 return put_user(val, (unsigned int __user *) adr);
1772 int set_endian(struct task_struct *tsk, unsigned int val)
1774 struct pt_regs *regs = tsk->thread.regs;
1776 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1777 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1783 if (val == PR_ENDIAN_BIG)
1784 regs->msr &= ~MSR_LE;
1785 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1786 regs->msr |= MSR_LE;
1793 int get_endian(struct task_struct *tsk, unsigned long adr)
1795 struct pt_regs *regs = tsk->thread.regs;
1798 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1799 !cpu_has_feature(CPU_FTR_REAL_LE))
1805 if (regs->msr & MSR_LE) {
1806 if (cpu_has_feature(CPU_FTR_REAL_LE))
1807 val = PR_ENDIAN_LITTLE;
1809 val = PR_ENDIAN_PPC_LITTLE;
1811 val = PR_ENDIAN_BIG;
1813 return put_user(val, (unsigned int __user *)adr);
1816 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1818 tsk->thread.align_ctl = val;
1822 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1824 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1827 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1828 unsigned long nbytes)
1830 unsigned long stack_page;
1831 unsigned long cpu = task_cpu(p);
1834 * Avoid crashing if the stack has overflowed and corrupted
1835 * task_cpu(p), which is in the thread_info struct.
1837 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1838 stack_page = (unsigned long) hardirq_ctx[cpu];
1839 if (sp >= stack_page + sizeof(struct thread_struct)
1840 && sp <= stack_page + THREAD_SIZE - nbytes)
1843 stack_page = (unsigned long) softirq_ctx[cpu];
1844 if (sp >= stack_page + sizeof(struct thread_struct)
1845 && sp <= stack_page + THREAD_SIZE - nbytes)
1851 int validate_sp(unsigned long sp, struct task_struct *p,
1852 unsigned long nbytes)
1854 unsigned long stack_page = (unsigned long)task_stack_page(p);
1856 if (sp >= stack_page + sizeof(struct thread_struct)
1857 && sp <= stack_page + THREAD_SIZE - nbytes)
1860 return valid_irq_stack(sp, p, nbytes);
1863 EXPORT_SYMBOL(validate_sp);
1865 unsigned long get_wchan(struct task_struct *p)
1867 unsigned long ip, sp;
1870 if (!p || p == current || p->state == TASK_RUNNING)
1874 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1878 sp = *(unsigned long *)sp;
1879 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1882 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1883 if (!in_sched_functions(ip))
1886 } while (count++ < 16);
1890 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1892 void show_stack(struct task_struct *tsk, unsigned long *stack)
1894 unsigned long sp, ip, lr, newsp;
1897 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1898 int curr_frame = current->curr_ret_stack;
1899 extern void return_to_handler(void);
1900 unsigned long rth = (unsigned long)return_to_handler;
1903 sp = (unsigned long) stack;
1908 sp = current_stack_pointer();
1910 sp = tsk->thread.ksp;
1914 printk("Call Trace:\n");
1916 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1919 stack = (unsigned long *) sp;
1921 ip = stack[STACK_FRAME_LR_SAVE];
1922 if (!firstframe || ip != lr) {
1923 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1924 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1925 if ((ip == rth) && curr_frame >= 0) {
1927 (void *)current->ret_stack[curr_frame].ret);
1932 pr_cont(" (unreliable)");
1938 * See if this is an exception frame.
1939 * We look for the "regshere" marker in the current frame.
1941 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1942 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1943 struct pt_regs *regs = (struct pt_regs *)
1944 (sp + STACK_FRAME_OVERHEAD);
1946 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1947 regs->trap, (void *)regs->nip, (void *)lr);
1952 } while (count++ < kstack_depth_to_print);
1956 /* Called with hard IRQs off */
1957 void notrace __ppc64_runlatch_on(void)
1959 struct thread_info *ti = current_thread_info();
1962 ctrl = mfspr(SPRN_CTRLF);
1963 ctrl |= CTRL_RUNLATCH;
1964 mtspr(SPRN_CTRLT, ctrl);
1966 ti->local_flags |= _TLF_RUNLATCH;
1969 /* Called with hard IRQs off */
1970 void notrace __ppc64_runlatch_off(void)
1972 struct thread_info *ti = current_thread_info();
1975 ti->local_flags &= ~_TLF_RUNLATCH;
1977 ctrl = mfspr(SPRN_CTRLF);
1978 ctrl &= ~CTRL_RUNLATCH;
1979 mtspr(SPRN_CTRLT, ctrl);
1981 #endif /* CONFIG_PPC64 */
1983 unsigned long arch_align_stack(unsigned long sp)
1985 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1986 sp -= get_random_int() & ~PAGE_MASK;
1990 static inline unsigned long brk_rnd(void)
1992 unsigned long rnd = 0;
1994 /* 8MB for 32bit, 1GB for 64bit */
1995 if (is_32bit_task())
1996 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1998 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2000 return rnd << PAGE_SHIFT;
2003 unsigned long arch_randomize_brk(struct mm_struct *mm)
2005 unsigned long base = mm->brk;
2008 #ifdef CONFIG_PPC_STD_MMU_64
2010 * If we are using 1TB segments and we are allowed to randomise
2011 * the heap, we can put it above 1TB so it is backed by a 1TB
2012 * segment. Otherwise the heap will be in the bottom 1TB
2013 * which always uses 256MB segments and this may result in a
2014 * performance penalty. We don't need to worry about radix. For
2015 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2017 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2018 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2021 ret = PAGE_ALIGN(base + brk_rnd());