2 * entry.S -- non-mmu 68000 interrupt and exception entry points
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file README.legal in the main directory of this archive
10 * Linux/m68k support by Hamish Macdonald
13 #include <linux/linkage.h>
14 #include <asm/thread_info.h>
15 #include <asm/unistd.h>
16 #include <asm/errno.h>
17 #include <asm/setup.h>
18 #include <asm/segment.h>
19 #include <asm/traps.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/entry.h>
27 .globl ret_from_exception
28 .globl ret_from_signal
40 movel #-ENOSYS,%sp@(PT_OFF_D0)
41 jra ret_from_exception
44 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 jbsr syscall_trace_enter
51 jeq ret_from_exception
52 movel %sp@(PT_OFF_ORIG_D0),%d1
57 lea sys_call_table, %a0
60 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */
63 jbsr syscall_trace_leave
68 jra ret_from_exception
73 /* save top of frame*/
78 movel %sp@(PT_OFF_ORIG_D0),%d0
80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1
83 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
88 lea sys_call_table,%a0
91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/
101 /* only allow interrupts when we are really the last one on the*/
102 /* kernel stack, otherwise stack overflow can occur during*/
103 /* heavy interrupt load*/
106 movel %sp,%d1 /* get thread_info pointer */
107 andl #-THREAD_SIZE,%d1
110 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
115 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1
120 subql #4,%sp /* dummy return address*/
122 pea %sp@(SWITCH_STACK_SIZE)
123 bsrw do_notify_resume
130 * This is the main interrupt handler, responsible for calling process_int()
134 movew %sp@(PT_OFF_FORMATVEC), %d0
138 movel #65,%sp@- /* put vector # on stack*/
139 jbsr process_int /* process the IRQ*/
140 3: addql #8,%sp /* pop parameters off stack*/
141 bra ret_from_exception
145 movew %sp@(PT_OFF_FORMATVEC), %d0
149 movel #66,%sp@- /* put vector # on stack*/
150 jbsr process_int /* process the IRQ*/
151 3: addql #8,%sp /* pop parameters off stack*/
152 bra ret_from_exception
156 movew %sp@(PT_OFF_FORMATVEC), %d0
160 movel #67,%sp@- /* put vector # on stack*/
161 jbsr process_int /* process the IRQ*/
162 3: addql #8,%sp /* pop parameters off stack*/
163 bra ret_from_exception
167 movew %sp@(PT_OFF_FORMATVEC), %d0
171 movel #68,%sp@- /* put vector # on stack*/
172 jbsr process_int /* process the IRQ*/
173 3: addql #8,%sp /* pop parameters off stack*/
174 bra ret_from_exception
178 movew %sp@(PT_OFF_FORMATVEC), %d0
182 movel #69,%sp@- /* put vector # on stack*/
183 jbsr process_int /* process the IRQ*/
184 3: addql #8,%sp /* pop parameters off stack*/
185 bra ret_from_exception
189 movew %sp@(PT_OFF_FORMATVEC), %d0
193 movel #70,%sp@- /* put vector # on stack*/
194 jbsr process_int /* process the IRQ*/
195 3: addql #8,%sp /* pop parameters off stack*/
196 bra ret_from_exception
200 movew %sp@(PT_OFF_FORMATVEC), %d0
204 movel #71,%sp@- /* put vector # on stack*/
205 jbsr process_int /* process the IRQ*/
206 3: addql #8,%sp /* pop parameters off stack*/
207 bra ret_from_exception
211 movew %sp@(PT_OFF_FORMATVEC), %d0
215 movel %d0,%sp@- /* put vector # on stack*/
216 jbsr process_int /* process the IRQ*/
217 3: addql #8,%sp /* pop parameters off stack*/
218 bra ret_from_exception
221 * Handler for uninitialized and spurious interrupts.
224 addql #1,irq_err_count
228 * Beware - when entering resume, prev (the current task) is
229 * in a0, next (the new task) is in a1, so don't change these
230 * registers until their contents are no longer needed.
233 movel %a0,%d1 /* save prev thread in d1 */
234 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
236 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
237 movel %usp,%a3 /* save usp */
238 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
240 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
242 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
244 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */