2 * entry.S -- non-mmu 68000 interrupt and exception entry points
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file README.legal in the main directory of this archive
10 * Linux/m68k support by Hamish Macdonald
13 #include <linux/linkage.h>
14 #include <asm/thread_info.h>
15 #include <asm/unistd.h>
16 #include <asm/errno.h>
17 #include <asm/setup.h>
18 #include <asm/traps.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/entry.h>
26 .globl ret_from_exception
38 movel #-ENOSYS,%sp@(PT_OFF_D0)
39 jra ret_from_exception
42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
45 jbsr syscall_trace_enter
49 jeq ret_from_exception
50 movel %sp@(PT_OFF_ORIG_D0),%d1
55 lea sys_call_table, %a0
58 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
59 subql #4,%sp /* dummy return address */
61 jbsr syscall_trace_leave
64 jra ret_from_exception
69 /* save top of frame*/
74 movel %sp@(PT_OFF_ORIG_D0),%d0
76 movel %sp,%d1 /* get thread_info pointer */
77 andl #-THREAD_SIZE,%d1
79 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84 lea sys_call_table,%a0
87 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
90 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
91 jeq Luser_return /* if so, skip resched, signals*/
97 /* only allow interrupts when we are really the last one on the*/
98 /* kernel stack, otherwise stack overflow can occur during*/
99 /* heavy interrupt load*/
102 movel %sp,%d1 /* get thread_info pointer */
103 andl #-THREAD_SIZE,%d1
106 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
111 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
112 btst #TIF_NEED_RESCHED,%d1
116 subql #4,%sp /* dummy return address*/
118 pea %sp@(SWITCH_STACK_SIZE)
119 bsrw do_notify_resume
126 * This is the main interrupt handler, responsible for calling process_int()
130 movew %sp@(PT_OFF_FORMATVEC), %d0
134 movel #65,%sp@- /* put vector # on stack*/
135 jbsr process_int /* process the IRQ*/
136 3: addql #8,%sp /* pop parameters off stack*/
137 bra ret_from_exception
141 movew %sp@(PT_OFF_FORMATVEC), %d0
145 movel #66,%sp@- /* put vector # on stack*/
146 jbsr process_int /* process the IRQ*/
147 3: addql #8,%sp /* pop parameters off stack*/
148 bra ret_from_exception
152 movew %sp@(PT_OFF_FORMATVEC), %d0
156 movel #67,%sp@- /* put vector # on stack*/
157 jbsr process_int /* process the IRQ*/
158 3: addql #8,%sp /* pop parameters off stack*/
159 bra ret_from_exception
163 movew %sp@(PT_OFF_FORMATVEC), %d0
167 movel #68,%sp@- /* put vector # on stack*/
168 jbsr process_int /* process the IRQ*/
169 3: addql #8,%sp /* pop parameters off stack*/
170 bra ret_from_exception
174 movew %sp@(PT_OFF_FORMATVEC), %d0
178 movel #69,%sp@- /* put vector # on stack*/
179 jbsr process_int /* process the IRQ*/
180 3: addql #8,%sp /* pop parameters off stack*/
181 bra ret_from_exception
185 movew %sp@(PT_OFF_FORMATVEC), %d0
189 movel #70,%sp@- /* put vector # on stack*/
190 jbsr process_int /* process the IRQ*/
191 3: addql #8,%sp /* pop parameters off stack*/
192 bra ret_from_exception
196 movew %sp@(PT_OFF_FORMATVEC), %d0
200 movel #71,%sp@- /* put vector # on stack*/
201 jbsr process_int /* process the IRQ*/
202 3: addql #8,%sp /* pop parameters off stack*/
203 bra ret_from_exception
207 movew %sp@(PT_OFF_FORMATVEC), %d0
211 movel %d0,%sp@- /* put vector # on stack*/
212 jbsr process_int /* process the IRQ*/
213 3: addql #8,%sp /* pop parameters off stack*/
214 bra ret_from_exception
217 * Handler for uninitialized and spurious interrupts.
220 addql #1,irq_err_count
224 * Beware - when entering resume, prev (the current task) is
225 * in a0, next (the new task) is in a1, so don't change these
226 * registers until their contents are no longer needed.
229 movel %a0,%d1 /* save prev thread in d1 */
230 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
232 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
233 movel %usp,%a3 /* save usp */
234 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
236 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
238 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
240 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */