1 /* SPDX-License-Identifier: GPL-2.0-or-later
3 * entry.S -- non-mmu 68000 interrupt and exception entry points
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Linux/m68k support by Hamish Macdonald
10 #include <linux/linkage.h>
11 #include <asm/thread_info.h>
12 #include <asm/unistd.h>
13 #include <asm/errno.h>
14 #include <asm/setup.h>
15 #include <asm/traps.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/entry.h>
23 .globl ret_from_exception
35 movel #-ENOSYS,%sp@(PT_OFF_D0)
36 jra ret_from_exception
39 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
42 jbsr syscall_trace_enter
46 jeq ret_from_exception
47 movel %sp@(PT_OFF_ORIG_D0),%d1
52 lea sys_call_table, %a0
55 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
56 subql #4,%sp /* dummy return address */
58 jbsr syscall_trace_leave
61 jra ret_from_exception
66 /* save top of frame*/
71 movel %sp@(PT_OFF_ORIG_D0),%d0
73 movel %sp,%d1 /* get thread_info pointer */
74 andl #-THREAD_SIZE,%d1
76 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
81 lea sys_call_table,%a0
84 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
87 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
88 jeq Luser_return /* if so, skip resched, signals*/
94 /* only allow interrupts when we are really the last one on the*/
95 /* kernel stack, otherwise stack overflow can occur during*/
96 /* heavy interrupt load*/
99 movel %sp,%d1 /* get thread_info pointer */
100 andl #-THREAD_SIZE,%d1
103 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
108 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
109 btst #TIF_NEED_RESCHED,%d1
113 subql #4,%sp /* dummy return address*/
115 pea %sp@(SWITCH_STACK_SIZE)
116 bsrw do_notify_resume
123 * This is the main interrupt handler, responsible for calling process_int()
127 movew %sp@(PT_OFF_FORMATVEC), %d0
131 movel #65,%sp@- /* put vector # on stack*/
132 jbsr process_int /* process the IRQ*/
133 3: addql #8,%sp /* pop parameters off stack*/
134 bra ret_from_exception
138 movew %sp@(PT_OFF_FORMATVEC), %d0
142 movel #66,%sp@- /* put vector # on stack*/
143 jbsr process_int /* process the IRQ*/
144 3: addql #8,%sp /* pop parameters off stack*/
145 bra ret_from_exception
149 movew %sp@(PT_OFF_FORMATVEC), %d0
153 movel #67,%sp@- /* put vector # on stack*/
154 jbsr process_int /* process the IRQ*/
155 3: addql #8,%sp /* pop parameters off stack*/
156 bra ret_from_exception
160 movew %sp@(PT_OFF_FORMATVEC), %d0
164 movel #68,%sp@- /* put vector # on stack*/
165 jbsr process_int /* process the IRQ*/
166 3: addql #8,%sp /* pop parameters off stack*/
167 bra ret_from_exception
171 movew %sp@(PT_OFF_FORMATVEC), %d0
175 movel #69,%sp@- /* put vector # on stack*/
176 jbsr process_int /* process the IRQ*/
177 3: addql #8,%sp /* pop parameters off stack*/
178 bra ret_from_exception
182 movew %sp@(PT_OFF_FORMATVEC), %d0
186 movel #70,%sp@- /* put vector # on stack*/
187 jbsr process_int /* process the IRQ*/
188 3: addql #8,%sp /* pop parameters off stack*/
189 bra ret_from_exception
193 movew %sp@(PT_OFF_FORMATVEC), %d0
197 movel #71,%sp@- /* put vector # on stack*/
198 jbsr process_int /* process the IRQ*/
199 3: addql #8,%sp /* pop parameters off stack*/
200 bra ret_from_exception
204 movew %sp@(PT_OFF_FORMATVEC), %d0
208 movel %d0,%sp@- /* put vector # on stack*/
209 jbsr process_int /* process the IRQ*/
210 3: addql #8,%sp /* pop parameters off stack*/
211 bra ret_from_exception
214 * Handler for uninitialized and spurious interrupts.
217 addql #1,irq_err_count
221 * Beware - when entering resume, prev (the current task) is
222 * in a0, next (the new task) is in a1, so don't change these
223 * registers until their contents are no longer needed.
226 movel %a0,%d1 /* save prev thread in d1 */
227 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
229 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
230 movel %usp,%a3 /* save usp */
231 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
233 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
235 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
237 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */