1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
4 #include <asm/segment.h>
5 #include <asm/page_types.h>
6 #include <uapi/asm/ptrace.h>
23 unsigned long orig_ax;
35 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
36 * unless syscall needs a complete, fully filled "struct pt_regs".
44 /* These regs are callee-clobbered. Always saved on kernel entry. */
55 * On syscall entry, this is syscall#. On CPU exception, this is error code.
56 * On hw interrupt, it's IRQ number:
58 unsigned long orig_ax;
59 /* Return frame for iretq */
65 /* top of stack page */
68 #endif /* !__i386__ */
70 #ifdef CONFIG_PARAVIRT
71 #include <asm/paravirt_types.h>
77 extern unsigned long profile_pc(struct pt_regs *regs);
78 #define profile_pc profile_pc
81 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
82 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
83 int error_code, int si_code);
86 extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
87 extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
88 unsigned long phase1_result);
90 extern long syscall_trace_enter(struct pt_regs *);
92 static inline unsigned long regs_return_value(struct pt_regs *regs)
98 * user_mode(regs) determines whether a register set came from user
99 * mode. On x86_32, this is true if V8086 mode was enabled OR if the
100 * register set was from protected mode with RPL-3 CS value. This
101 * tricky test checks that with one comparison.
103 * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
106 static inline int user_mode(struct pt_regs *regs)
109 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
111 return !!(regs->cs & 3);
115 static inline int v8086_mode(struct pt_regs *regs)
118 return (regs->flags & X86_VM_MASK);
120 return 0; /* No V86 mode support in long mode */
124 static inline bool user_64bit_mode(struct pt_regs *regs)
127 #ifndef CONFIG_PARAVIRT
129 * On non-paravirt systems, this is the only long mode CPL 3
130 * selector. We do not allow long mode selectors in the LDT.
132 return regs->cs == __USER_CS;
134 /* Headers are too twisted for this to go in paravirt.h. */
135 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
137 #else /* !CONFIG_X86_64 */
143 #define current_user_stack_pointer() current_pt_regs()->sp
144 #define compat_user_stack_pointer() current_pt_regs()->sp
148 extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
150 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
156 #define GET_IP(regs) ((regs)->ip)
157 #define GET_FP(regs) ((regs)->bp)
158 #define GET_USP(regs) ((regs)->sp)
160 #include <asm-generic/ptrace.h>
162 /* Query offset/name of register from its name/offset */
163 extern int regs_query_register_offset(const char *name);
164 extern const char *regs_query_register_name(unsigned int offset);
165 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
168 * regs_get_register() - get register value from its offset
169 * @regs: pt_regs from which register value is gotten.
170 * @offset: offset number of the register.
172 * regs_get_register returns the value of a register. The @offset is the
173 * offset of the register in struct pt_regs address which specified by @regs.
174 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
176 static inline unsigned long regs_get_register(struct pt_regs *regs,
179 if (unlikely(offset > MAX_REG_OFFSET))
183 * Traps from the kernel do not save sp and ss.
184 * Use the helper function to retrieve sp.
186 if (offset == offsetof(struct pt_regs, sp) &&
187 regs->cs == __KERNEL_CS)
188 return kernel_stack_pointer(regs);
190 return *(unsigned long *)((unsigned long)regs + offset);
194 * regs_within_kernel_stack() - check the address in the stack
195 * @regs: pt_regs which contains kernel stack pointer.
196 * @addr: address which is checked.
198 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
199 * If @addr is within the kernel stack, it returns true. If not, returns false.
201 static inline int regs_within_kernel_stack(struct pt_regs *regs,
204 return ((addr & ~(THREAD_SIZE - 1)) ==
205 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
209 * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
210 * @regs: pt_regs which contains kernel stack pointer.
211 * @n: stack entry number.
213 * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
214 * kernel stack which is specified by @regs. If the @n th entry is NOT in
215 * the kernel stack, this returns NULL.
217 static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
219 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
222 if (regs_within_kernel_stack(regs, (unsigned long)addr))
228 /* To avoid include hell, we can't include uaccess.h */
229 extern long probe_kernel_read(void *dst, const void *src, size_t size);
232 * regs_get_kernel_stack_nth() - get Nth entry of the stack
233 * @regs: pt_regs which contains kernel stack pointer.
234 * @n: stack entry number.
236 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
237 * is specified by @regs. If the @n th entry is NOT in the kernel stack
240 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
247 addr = regs_get_kernel_stack_nth_addr(regs, n);
249 ret = probe_kernel_read(&val, addr, sizeof(val));
256 #define arch_has_single_step() (1)
257 #ifdef CONFIG_X86_DEBUGCTLMSR
258 #define arch_has_block_step() (1)
260 #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
263 #define ARCH_HAS_USER_SINGLE_STEP_INFO
266 * When hitting ptrace_stop(), we cannot return using SYSRET because
267 * that does not restore the full CPU state, only a minimal set. The
268 * ptracer can change arbitrary register values, which is usually okay
269 * because the usual ptrace stops run off the signal delivery path which
270 * forces IRET; however, ptrace_event() stops happen in arbitrary places
271 * in the kernel and don't force IRET path.
273 * So force IRET path after a ptrace stop.
275 #define arch_ptrace_stop_needed(code, info) \
282 extern int do_get_thread_area(struct task_struct *p, int idx,
283 struct user_desc __user *info);
284 extern int do_set_thread_area(struct task_struct *p, int idx,
285 struct user_desc __user *info, int can_allocate);
287 #endif /* !__ASSEMBLY__ */
288 #endif /* _ASM_X86_PTRACE_H */