1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/signal.h>
23 #include <asm/unistd.h>
25 #include <asm/traps.h>
26 #include <asm/thread_info.h>
27 #include <asm/alternative.h>
29 #include <linux/linkage.h>
30 #include <linux/pgtable.h>
38 /* Get aligned page_table_lock address for this mm from cr28/tr4 */
43 /* space_to_prot macro creates a prot id from a space id */
45 #if (SPACEID_SHIFT) == 0
46 .macro space_to_prot spc prot
47 depd,z \spc,62,31,\prot
50 .macro space_to_prot spc prot
51 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
55 * The "get_stack" macros are responsible for determining the
59 * Already using a kernel stack, so call the
60 * get_stack_use_r30 macro to push a pt_regs structure
61 * on the stack, and store registers there.
63 * Need to set up a kernel stack, so call the
64 * get_stack_use_cr30 macro to set up a pointer
65 * to the pt_regs structure contained within the
66 * task pointer pointed to by cr30. Load the stack
67 * pointer from the task structure.
69 * Note that we use shadowed registers for temps until
70 * we can save %r26 and %r29. %r26 is used to preserve
71 * %r8 (a shadowed register) which temporarily contained
72 * either the fault type ("code") or the eirr. We need
73 * to use a non-shadowed register to carry the value over
74 * the rfir in virt_map. We use %r26 since this value winds
75 * up being passed as the argument to either do_cpu_irq_mask
76 * or handle_interruption. %r29 is used to hold a pointer
77 * the register save area, and once again, it needs to
78 * be a non-shadowed register so that it survives the rfir.
81 .macro get_stack_use_cr30
83 /* we save the registers in the task struct */
87 tophys %r1,%r9 /* task_struct */
88 LDREG TASK_STACK(%r9),%r30
89 ldo PT_SZ_ALGN(%r30),%r30
90 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */
92 ldo TASK_REGS(%r9),%r9
93 STREG %r17,PT_GR30(%r9)
94 STREG %r29,PT_GR29(%r9)
95 STREG %r26,PT_GR26(%r9)
96 STREG %r16,PT_SR7(%r9)
100 .macro get_stack_use_r30
102 /* we put a struct pt_regs on the stack and save the registers there */
106 ldo PT_SZ_ALGN(%r30),%r30
107 STREG %r1,PT_GR30(%r9)
108 STREG %r29,PT_GR29(%r9)
109 STREG %r26,PT_GR26(%r9)
110 STREG %r16,PT_SR7(%r9)
115 LDREG PT_GR1(%r29), %r1
116 LDREG PT_GR30(%r29),%r30
117 LDREG PT_GR29(%r29),%r29
120 /* default interruption handler
121 * (calls traps.c:handle_interruption) */
128 /* Interrupt interruption handler
129 * (calls irq.c:do_cpu_irq_mask) */
136 .import os_hpmc, code
140 nop /* must be a NOP, will be patched later */
141 load32 PA(os_hpmc), %r3
144 .word 0 /* checksum (will be patched) */
145 .word 0 /* address of handler */
146 .word 0 /* length of handler */
150 * Performance Note: Instructions will be moved up into
151 * this part of the code later on, once we are sure
152 * that the tlb miss handlers are close to final form.
155 /* Register definitions for tlb miss handler macros */
157 va = r8 /* virtual address for which the trap occurred */
158 spc = r24 /* space for which the trap occurred */
163 * itlb miss interruption handler (parisc 1.1 - 32 bit)
177 * itlb miss interruption handler (parisc 2.0)
194 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
197 .macro naitlb_11 code
208 * naitlb miss interruption handler (parisc 2.0)
211 .macro naitlb_20 code
226 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
240 * dtlb miss interruption handler (parisc 2.0)
257 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
259 .macro nadtlb_11 code
269 /* nadtlb miss interruption handler (parisc 2.0) */
271 .macro nadtlb_20 code
286 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
300 * dirty bit trap interruption handler (parisc 2.0)
316 /* In LP64, the space contains part of the upper 32 bits of the
317 * fault. We have to extract this and place it in the va,
318 * zeroing the corresponding bits in the space register */
319 .macro space_adjust spc,va,tmp
321 extrd,u \spc,63,SPACEID_SHIFT,\tmp
322 depd %r0,63,SPACEID_SHIFT,\spc
323 depd \tmp,31,SPACEID_SHIFT,\va
327 .import swapper_pg_dir,code
329 /* Get the pgd. For faults on space zero (kernel space), this
330 * is simply swapper_pg_dir. For user space faults, the
331 * pgd is stored in %cr25 */
332 .macro get_pgd spc,reg
333 ldil L%PA(swapper_pg_dir),\reg
334 ldo R%PA(swapper_pg_dir)(\reg),\reg
335 or,COND(=) %r0,\spc,%r0
340 space_check(spc,tmp,fault)
342 spc - The space we saw the fault with.
343 tmp - The place to store the current space.
344 fault - Function to call on failure.
346 Only allow faults on different spaces from the
347 currently active one if we're the kernel
350 .macro space_check spc,tmp,fault
352 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
354 * as kernel, so defeat the space
357 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
358 cmpb,COND(<>),n \tmp,\spc,\fault
361 /* Look up a PTE in a 2-Level scheme (faulting at each
362 * level if the entry isn't present
364 * NOTE: we use ldw even for LP64, since the short pointers
365 * can address up to 1TB
367 .macro L2_ptep pmd,pte,index,va,fault
368 #if CONFIG_PGTABLE_LEVELS == 3
369 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
371 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
373 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
374 #if CONFIG_PGTABLE_LEVELS < 3
377 ldw,s \index(\pmd),\pmd
378 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
379 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
380 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
381 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
382 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
383 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
386 /* Look up PTE in a 3-Level scheme. */
387 .macro L3_ptep pgd,pte,index,va,fault
388 #if CONFIG_PGTABLE_LEVELS == 3
390 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391 ldw,s \index(\pgd),\pgd
392 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
393 shld \pgd,PxD_VALUE_SHIFT,\pgd
395 L2_ptep \pgd,\pte,\index,\va,\fault
398 /* Acquire page_table_lock and check page is present. */
399 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
400 #ifdef CONFIG_TLB_PTLOCK
401 98: cmpib,COND(=),n 0,\spc,2f
403 1: LDCW 0(\tmp),\tmp1
404 cmpib,COND(=) 0,\tmp1,1b
407 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
410 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
412 2: LDREG 0(\ptp),\pte
413 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
417 /* Release page_table_lock without reloading lock address.
418 Note that the values in the register spc are limited to
419 NR_SPACE_IDS (262144). Thus, the stw instruction always
420 stores a nonzero value even when register spc is 64 bits.
421 We use an ordered store to ensure all prior accesses are
422 performed prior to releasing the lock. */
423 .macro ptl_unlock0 spc,tmp
424 #ifdef CONFIG_TLB_PTLOCK
425 98: or,COND(=) %r0,\spc,%r0
427 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
431 /* Release page_table_lock. */
432 .macro ptl_unlock1 spc,tmp
433 #ifdef CONFIG_TLB_PTLOCK
435 ptl_unlock0 \spc,\tmp
436 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
440 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
441 * don't needlessly dirty the cache line if it was already set */
442 .macro update_accessed ptp,pte,tmp,tmp1
443 ldi _PAGE_ACCESSED,\tmp1
445 and,COND(<>) \tmp1,\pte,%r0
449 /* Set the dirty bit (and accessed bit). No need to be
450 * clever, this is only used from the dirty fault */
451 .macro update_dirty ptp,pte,tmp
452 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
457 /* We have (depending on the page size):
458 * - 38 to 52-bit Physical Page Number
459 * - 12 to 26-bit page offset
461 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
462 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
463 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
464 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
466 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
467 .macro convert_for_tlb_insert20 pte,tmp
468 #ifdef CONFIG_HUGETLB_PAGE
470 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
473 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
474 (63-58)+PAGE_ADD_SHIFT,\pte
475 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
476 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
477 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
478 #else /* Huge pages disabled */
479 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
480 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
481 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
482 (63-58)+PAGE_ADD_SHIFT,\pte
486 /* Convert the pte and prot to tlb insertion values. How
487 * this happens is quite subtle, read below */
488 .macro make_insert_tlb spc,pte,prot,tmp
489 space_to_prot \spc \prot /* create prot id from space */
490 /* The following is the real subtlety. This is depositing
491 * T <-> _PAGE_REFTRAP
493 * B <-> _PAGE_DMB (memory break)
495 * Then incredible subtlety: The access rights are
496 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
497 * See 3-14 of the parisc 2.0 manual
499 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
500 * trigger an access rights trap in user space if the user
501 * tries to read an unreadable page */
504 /* PAGE_USER indicates the page can be read with user privileges,
505 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
506 * contains _PAGE_READ) */
507 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
509 /* If we're a gateway page, drop PL2 back to zero for promotion
510 * to kernel privilege (so we can execute the page as kernel).
511 * Any privilege promotion page always denys read and write */
512 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
513 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
515 /* Enforce uncacheable pages.
516 * This should ONLY be use for MMIO on PA 2.0 machines.
517 * Memory/DMA is cache coherent on all PA2.0 machines we support
518 * (that means T-class is NOT supported) and the memory controllers
519 * on most of those machines only handles cache transactions.
521 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
524 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
525 convert_for_tlb_insert20 \pte \tmp
528 /* Identical macro to make_insert_tlb above, except it
529 * makes the tlb entry for the differently formatted pa11
530 * insertion instructions */
531 .macro make_insert_tlb_11 spc,pte,prot
532 zdep \spc,30,15,\prot
534 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
536 extru,= \pte,_PAGE_USER_BIT,1,%r0
537 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
538 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
539 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
541 /* Get rid of prot bits and convert to page addr for iitlba */
543 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
544 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
547 /* This is for ILP32 PA2.0 only. The TLB insertion needs
548 * to extend into I/O space if the address is 0xfXXXXXXX
549 * so we extend the f's into the top word of the pte in
551 .macro f_extend pte,tmp
552 extrd,s \pte,42,4,\tmp
554 extrd,s \pte,63,25,\pte
557 /* The alias region is comprised of a pair of 4 MB regions
558 * aligned to 8 MB. It is used to clear/copy/flush user pages
559 * using kernel virtual addresses congruent with the user
562 * To use the alias page, you set %r26 up with the to TLB
563 * entry (identifying the physical page) and %r23 up with
564 * the from tlb entry (or nothing if only a to entry---for
565 * clear_user_page_asm) */
566 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
567 cmpib,COND(<>),n 0,\spc,\fault
568 ldil L%(TMPALIAS_MAP_START),\tmp
570 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1
571 cmpb,COND(<>),n \tmp,\tmp1,\fault
572 mfctl %cr19,\tmp /* iir */
573 /* get the opcode (first six bits) into \tmp */
574 extrw,u \tmp,5,6,\tmp
576 * Only setting the T bit prevents data cache movein
577 * Setting access rights to zero prevents instruction cache movein
579 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
580 * to type field and _PAGE_READ goes to top bit of PL1
582 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
584 * so if the opcode is one (i.e. this is a memory management
585 * instruction) nullify the next load so \prot is only T.
586 * Otherwise this is a normal data operation
588 cmpiclr,= 0x01,\tmp,%r0
589 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
591 depd,z \prot,8,7,\prot
594 depw,z \prot,8,7,\prot
596 .error "undefined PA type to do_alias"
600 * OK, it is in the temp alias region, check whether "from" or "to".
601 * Check "subtle" note in pacache.S re: r23/r26.
603 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0
604 or,COND(tr) %r23,%r0,\pte
607 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
608 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
609 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
614 * Fault_vectors are architecturally required to be aligned on a 2K
621 ENTRY(fault_vector_20)
622 /* First vector is invalid (0) */
623 .ascii "cows can fly"
632 itlb_20 PARISC_ITLB_TRAP
664 ENTRY(fault_vector_11)
665 /* First vector is invalid (0) */
666 .ascii "cows can fly"
675 itlb_11 PARISC_ITLB_TRAP
704 /* Fault vector is separately protected and *must* be on its own page */
707 .import handle_interruption,code
708 .import do_cpu_irq_mask,code
713 * copy_thread moved args into task save area.
716 ENTRY(ret_from_kernel_thread)
717 /* Call schedule_tail first though */
718 BL schedule_tail, %r2
721 mfctl %cr30,%r1 /* task_struct */
722 LDREG TASK_PT_GR25(%r1), %r26
724 LDREG TASK_PT_GR27(%r1), %r27
726 LDREG TASK_PT_GR26(%r1), %r1
729 b finish_child_return
731 END(ret_from_kernel_thread)
735 * struct task_struct *_switch_to(struct task_struct *prev,
736 * struct task_struct *next)
738 * switch kernel stacks and return prev */
739 ENTRY_CFI(_switch_to)
740 STREG %r2, -RP_OFFSET(%r30)
745 load32 _switch_to_ret, %r2
747 STREG %r2, TASK_PT_KPC(%r26)
748 LDREG TASK_PT_KPC(%r25), %r2
750 STREG %r30, TASK_PT_KSP(%r26)
751 LDREG TASK_PT_KSP(%r25), %r30
755 ENTRY(_switch_to_ret)
756 mtctl %r0, %cr0 /* Needed for single stepping */
760 LDREG -RP_OFFSET(%r30), %r2
763 ENDPROC_CFI(_switch_to)
766 * Common rfi return path for interruptions, kernel execve, and
767 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
768 * return via this path if the signal was received when the process
769 * was running; if the process was blocked on a syscall then the
770 * normal syscall_exit path is used. All syscalls for traced
771 * proceses exit via intr_restore.
773 * XXX If any syscalls that change a processes space id ever exit
774 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
781 ENTRY_CFI(syscall_exit_rfi)
782 mfctl %cr30,%r16 /* task_struct */
783 ldo TASK_REGS(%r16),%r16
784 /* Force iaoq to userspace, as the user has had access to our current
785 * context via sigcontext. Also Filter the PSW for the same reason.
787 LDREG PT_IAOQ0(%r16),%r19
788 depi PRIV_USER,31,2,%r19
789 STREG %r19,PT_IAOQ0(%r16)
790 LDREG PT_IAOQ1(%r16),%r19
791 depi PRIV_USER,31,2,%r19
792 STREG %r19,PT_IAOQ1(%r16)
793 LDREG PT_PSW(%r16),%r19
794 load32 USER_PSW_MASK,%r1
796 load32 USER_PSW_HI_MASK,%r20
799 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
801 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
802 STREG %r19,PT_PSW(%r16)
805 * If we aren't being traced, we never saved space registers
806 * (we don't store them in the sigcontext), so set them
807 * to "proper" values now (otherwise we'll wind up restoring
808 * whatever was last stored in the task structure, which might
809 * be inconsistent if an interrupt occurred while on the gateway
810 * page). Note that we may be "trashing" values the user put in
811 * them, but we don't support the user changing them.
814 STREG %r0,PT_SR2(%r16)
816 STREG %r19,PT_SR0(%r16)
817 STREG %r19,PT_SR1(%r16)
818 STREG %r19,PT_SR3(%r16)
819 STREG %r19,PT_SR4(%r16)
820 STREG %r19,PT_SR5(%r16)
821 STREG %r19,PT_SR6(%r16)
822 STREG %r19,PT_SR7(%r16)
825 /* check for reschedule */
827 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
828 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
830 .import do_notify_resume,code
834 LDREG TASK_TI_FLAGS(%r1),%r19
835 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
836 and,COND(<>) %r19, %r20, %r0
837 b,n intr_restore /* skip past if we've nothing to do */
839 /* This check is critical to having LWS
840 * working. The IASQ is zero on the gateway
841 * page and we cannot deliver any signals until
842 * we get off the gateway page.
844 * Only do signals if we are returning to user space
846 LDREG PT_IASQ0(%r16), %r20
847 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
848 LDREG PT_IASQ1(%r16), %r20
849 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
851 copy %r0, %r25 /* long in_syscall = 0 */
853 ldo -16(%r30),%r29 /* Reference param save area */
856 /* NOTE: We need to enable interrupts if we have to deliver
857 * signals. We used to do this earlier but it caused kernel
858 * stack overflows. */
861 BL do_notify_resume,%r2
862 copy %r16, %r26 /* struct pt_regs *regs */
868 ldo PT_FR31(%r29),%r1
872 /* inverse of virt_map */
874 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
877 /* Restore space id's and special cr's from PT_REGS
878 * structure pointed to by r29
882 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
883 * It also restores r1 and r30.
890 #ifndef CONFIG_PREEMPTION
891 # define intr_do_preempt intr_restore
892 #endif /* !CONFIG_PREEMPTION */
894 .import schedule,code
896 /* Only call schedule on return to userspace. If we're returning
897 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
898 * we jump back to intr_restore.
900 LDREG PT_IASQ0(%r16), %r20
901 cmpib,COND(=) 0, %r20, intr_do_preempt
903 LDREG PT_IASQ1(%r16), %r20
904 cmpib,COND(=) 0, %r20, intr_do_preempt
907 /* NOTE: We need to enable interrupts if we schedule. We used
908 * to do this earlier but it caused kernel stack overflows. */
912 ldo -16(%r30),%r29 /* Reference param save area */
915 ldil L%intr_check_sig, %r2
919 load32 schedule, %r20
922 ldo R%intr_check_sig(%r2), %r2
924 /* preempt the current task on returning to kernel
925 * mode from an interrupt, iff need_resched is set,
926 * and preempt_count is 0. otherwise, we continue on
927 * our merry way back to the current running task.
929 #ifdef CONFIG_PREEMPTION
930 .import preempt_schedule_irq,code
932 rsm PSW_SM_I, %r0 /* disable interrupts */
934 /* current_thread_info()->preempt_count */
936 ldw TI_PRE_COUNT(%r1), %r19
937 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */
938 nop /* prev insn branched backwards */
940 /* check if we interrupted a critical path */
941 LDREG PT_PSW(%r16), %r20
942 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
945 /* ssm PSW_SM_I done later in intr_restore */
946 #ifdef CONFIG_MLONGCALLS
947 ldil L%intr_restore, %r2
948 load32 preempt_schedule_irq, %r1
950 ldo R%intr_restore(%r2), %r2
952 ldil L%intr_restore, %r1
953 BL preempt_schedule_irq, %r2
954 ldo R%intr_restore(%r1), %r2
956 #endif /* CONFIG_PREEMPTION */
959 * External interrupts.
963 cmpib,COND(=),n 0,%r16,1f
975 ldo PT_FR0(%r29), %r24
980 copy %r29, %r26 /* arg0 is pt_regs */
981 copy %r29, %r16 /* save pt_regs */
983 ldil L%intr_return, %r2
986 ldo -16(%r30),%r29 /* Reference param save area */
990 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
991 ENDPROC_CFI(syscall_exit_rfi)
994 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
996 ENTRY_CFI(intr_save) /* for os_hpmc */
998 cmpib,COND(=),n 0,%r16,1f
1010 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1011 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1015 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1021 * If the interrupted code was running with W bit off (32 bit),
1022 * clear the b bits (bits 0 & 1) in the ior.
1023 * save_specials left ipsw value in r8 for us to test.
1025 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1028 /* adjust isr/ior: get high bits from isr and deposit in ior */
1029 space_adjust %r16,%r17,%r1
1031 STREG %r16, PT_ISR(%r29)
1032 STREG %r17, PT_IOR(%r29)
1034 #if 0 && defined(CONFIG_64BIT)
1035 /* Revisit when we have 64-bit code above 4Gb */
1039 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1040 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1043 extrd,u,* %r8,PSW_W_BIT,1,%r1
1044 cmpib,COND(=),n 1,%r1,intr_save2
1045 LDREG PT_IASQ0(%r29), %r16
1046 LDREG PT_IAOQ0(%r29), %r17
1047 /* adjust iasq/iaoq */
1048 space_adjust %r16,%r17,%r1
1049 STREG %r16, PT_IASQ0(%r29)
1050 STREG %r17, PT_IAOQ0(%r29)
1059 ldo PT_FR0(%r29), %r25
1064 copy %r29, %r25 /* arg1 is pt_regs */
1066 ldo -16(%r30),%r29 /* Reference param save area */
1069 ldil L%intr_check_sig, %r2
1070 copy %r25, %r16 /* save pt_regs */
1072 b handle_interruption
1073 ldo R%intr_check_sig(%r2), %r2
1074 ENDPROC_CFI(intr_save)
1078 * Note for all tlb miss handlers:
1080 * cr24 contains a pointer to the kernel address space
1083 * cr25 contains a pointer to the current user address
1084 * space page directory.
1086 * sr3 will contain the space id of the user address space
1087 * of the current running thread while that thread is
1088 * running in the kernel.
1092 * register number allocations. Note that these are all
1093 * in the shadowed registers
1096 t0 = r1 /* temporary register 0 */
1097 va = r8 /* virtual address for which the trap occurred */
1098 t1 = r9 /* temporary register 1 */
1099 pte = r16 /* pte/phys page # */
1100 prot = r17 /* prot bits */
1101 spc = r24 /* space for which the trap occurred */
1102 ptp = r25 /* page directory/page table pointer */
1107 space_adjust spc,va,t0
1109 space_check spc,t0,dtlb_fault
1111 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1113 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1114 update_accessed ptp,pte,t0,t1
1116 make_insert_tlb spc,pte,prot,t1
1124 dtlb_check_alias_20w:
1125 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1133 space_adjust spc,va,t0
1135 space_check spc,t0,nadtlb_fault
1137 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1139 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1140 update_accessed ptp,pte,t0,t1
1142 make_insert_tlb spc,pte,prot,t1
1150 nadtlb_check_alias_20w:
1151 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1163 space_check spc,t0,dtlb_fault
1165 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1167 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1168 update_accessed ptp,pte,t0,t1
1170 make_insert_tlb_11 spc,pte,prot
1172 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1175 idtlba pte,(%sr1,va)
1176 idtlbp prot,(%sr1,va)
1178 mtsp t1, %sr1 /* Restore sr1 */
1184 dtlb_check_alias_11:
1185 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1196 space_check spc,t0,nadtlb_fault
1198 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1200 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1201 update_accessed ptp,pte,t0,t1
1203 make_insert_tlb_11 spc,pte,prot
1205 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1208 idtlba pte,(%sr1,va)
1209 idtlbp prot,(%sr1,va)
1211 mtsp t1, %sr1 /* Restore sr1 */
1217 nadtlb_check_alias_11:
1218 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1227 space_adjust spc,va,t0
1229 space_check spc,t0,dtlb_fault
1231 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1233 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1234 update_accessed ptp,pte,t0,t1
1236 make_insert_tlb spc,pte,prot,t1
1246 dtlb_check_alias_20:
1247 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1257 space_check spc,t0,nadtlb_fault
1259 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1261 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1262 update_accessed ptp,pte,t0,t1
1264 make_insert_tlb spc,pte,prot,t1
1274 nadtlb_check_alias_20:
1275 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1287 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1288 * probei instructions. The kernel no longer faults doing flushes.
1289 * Use of lpa and probe instructions is rare. Given the issue
1290 * with shadow registers, we defer everything to the "slow" path.
1298 * I miss is a little different, since we allow users to fault
1299 * on the gateway page which is in the kernel address space.
1302 space_adjust spc,va,t0
1304 space_check spc,t0,itlb_fault
1306 L3_ptep ptp,pte,t0,va,itlb_fault
1308 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1309 update_accessed ptp,pte,t0,t1
1311 make_insert_tlb spc,pte,prot,t1
1322 * I miss is a little different, since we allow users to fault
1323 * on the gateway page which is in the kernel address space.
1326 space_adjust spc,va,t0
1328 space_check spc,t0,naitlb_fault
1330 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1332 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1333 update_accessed ptp,pte,t0,t1
1335 make_insert_tlb spc,pte,prot,t1
1343 naitlb_check_alias_20w:
1344 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1356 space_check spc,t0,itlb_fault
1358 L2_ptep ptp,pte,t0,va,itlb_fault
1360 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1361 update_accessed ptp,pte,t0,t1
1363 make_insert_tlb_11 spc,pte,prot
1365 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1368 iitlba pte,(%sr1,va)
1369 iitlbp prot,(%sr1,va)
1371 mtsp t1, %sr1 /* Restore sr1 */
1380 space_check spc,t0,naitlb_fault
1382 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1384 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1385 update_accessed ptp,pte,t0,t1
1387 make_insert_tlb_11 spc,pte,prot
1389 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1392 iitlba pte,(%sr1,va)
1393 iitlbp prot,(%sr1,va)
1395 mtsp t1, %sr1 /* Restore sr1 */
1401 naitlb_check_alias_11:
1402 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1404 iitlba pte,(%sr0, va)
1405 iitlbp prot,(%sr0, va)
1414 space_check spc,t0,itlb_fault
1416 L2_ptep ptp,pte,t0,va,itlb_fault
1418 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1419 update_accessed ptp,pte,t0,t1
1421 make_insert_tlb spc,pte,prot,t1
1434 space_check spc,t0,naitlb_fault
1436 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1438 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1439 update_accessed ptp,pte,t0,t1
1441 make_insert_tlb spc,pte,prot,t1
1451 naitlb_check_alias_20:
1452 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1464 space_adjust spc,va,t0
1466 space_check spc,t0,dbit_fault
1468 L3_ptep ptp,pte,t0,va,dbit_fault
1470 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1471 update_dirty ptp,pte,t1
1473 make_insert_tlb spc,pte,prot,t1
1486 space_check spc,t0,dbit_fault
1488 L2_ptep ptp,pte,t0,va,dbit_fault
1490 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1491 update_dirty ptp,pte,t1
1493 make_insert_tlb_11 spc,pte,prot
1495 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1498 idtlba pte,(%sr1,va)
1499 idtlbp prot,(%sr1,va)
1501 mtsp t1, %sr1 /* Restore sr1 */
1510 space_check spc,t0,dbit_fault
1512 L2_ptep ptp,pte,t0,va,dbit_fault
1514 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1515 update_dirty ptp,pte,t1
1517 make_insert_tlb spc,pte,prot,t1
1528 .import handle_interruption,code
1532 ldi 31,%r8 /* Use an unused code */
1540 ldi PARISC_ITLB_TRAP,%r8
1554 /* Register saving semantics for system calls:
1556 %r1 clobbered by system call macro in userspace
1557 %r2 saved in PT_REGS by gateway page
1558 %r3 - %r18 preserved by C code (saved by signal code)
1559 %r19 - %r20 saved in PT_REGS by gateway page
1560 %r21 - %r22 non-standard syscall args
1561 stored in kernel stack by gateway page
1562 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1563 %r27 - %r30 saved in PT_REGS by gateway page
1564 %r31 syscall return pointer
1567 /* Floating point registers (FIXME: what do we do with these?)
1569 %fr0 - %fr3 status/exception, not preserved
1570 %fr4 - %fr7 arguments
1571 %fr8 - %fr11 not preserved by C code
1572 %fr12 - %fr21 preserved by C code
1573 %fr22 - %fr31 not preserved by C code
1576 .macro reg_save regs
1577 STREG %r3, PT_GR3(\regs)
1578 STREG %r4, PT_GR4(\regs)
1579 STREG %r5, PT_GR5(\regs)
1580 STREG %r6, PT_GR6(\regs)
1581 STREG %r7, PT_GR7(\regs)
1582 STREG %r8, PT_GR8(\regs)
1583 STREG %r9, PT_GR9(\regs)
1584 STREG %r10,PT_GR10(\regs)
1585 STREG %r11,PT_GR11(\regs)
1586 STREG %r12,PT_GR12(\regs)
1587 STREG %r13,PT_GR13(\regs)
1588 STREG %r14,PT_GR14(\regs)
1589 STREG %r15,PT_GR15(\regs)
1590 STREG %r16,PT_GR16(\regs)
1591 STREG %r17,PT_GR17(\regs)
1592 STREG %r18,PT_GR18(\regs)
1595 .macro reg_restore regs
1596 LDREG PT_GR3(\regs), %r3
1597 LDREG PT_GR4(\regs), %r4
1598 LDREG PT_GR5(\regs), %r5
1599 LDREG PT_GR6(\regs), %r6
1600 LDREG PT_GR7(\regs), %r7
1601 LDREG PT_GR8(\regs), %r8
1602 LDREG PT_GR9(\regs), %r9
1603 LDREG PT_GR10(\regs),%r10
1604 LDREG PT_GR11(\regs),%r11
1605 LDREG PT_GR12(\regs),%r12
1606 LDREG PT_GR13(\regs),%r13
1607 LDREG PT_GR14(\regs),%r14
1608 LDREG PT_GR15(\regs),%r15
1609 LDREG PT_GR16(\regs),%r16
1610 LDREG PT_GR17(\regs),%r17
1611 LDREG PT_GR18(\regs),%r18
1614 .macro fork_like name
1615 ENTRY_CFI(sys_\name\()_wrapper)
1617 ldo TASK_REGS(%r1),%r1
1620 ldil L%sys_\name, %r31
1621 be R%sys_\name(%sr4,%r31)
1622 STREG %r28, PT_CR27(%r1)
1623 ENDPROC_CFI(sys_\name\()_wrapper)
1631 /* Set the return value for the child */
1633 BL schedule_tail, %r2
1635 finish_child_return:
1637 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1639 LDREG PT_CR27(%r1), %r3
1646 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1648 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1649 /* Don't save regs, we are going to restore them from sigcontext. */
1650 STREG %r2, -RP_OFFSET(%r30)
1652 ldo FRAME_SIZE(%r30), %r30
1653 BL sys_rt_sigreturn,%r2
1654 ldo -16(%r30),%r29 /* Reference param save area */
1656 BL sys_rt_sigreturn,%r2
1657 ldo FRAME_SIZE(%r30), %r30
1660 ldo -FRAME_SIZE(%r30), %r30
1661 LDREG -RP_OFFSET(%r30), %r2
1663 /* FIXME: I think we need to restore a few more things here. */
1665 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1668 /* If the signal was received while the process was blocked on a
1669 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1670 * take us to syscall_exit_rfi and on to intr_return.
1673 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1674 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1677 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1678 * via syscall_exit_rfi if the signal was received while the process
1682 /* save return value now */
1684 STREG %r28,TASK_PT_GR28(%r1)
1686 /* Seems to me that dp could be wrong here, if the syscall involved
1687 * calling a module, and nothing got round to restoring dp on return.
1691 syscall_check_resched:
1693 /* check for reschedule */
1695 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */
1696 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1698 .import do_signal,code
1701 LDREG TASK_TI_FLAGS(%r19),%r19
1702 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1703 and,COND(<>) %r19, %r26, %r0
1704 b,n syscall_restore /* skip past if we've nothing to do */
1707 /* Save callee-save registers (for sigcontext).
1708 * FIXME: After this point the process structure should be
1709 * consistent with all the relevant state of the process
1710 * before the syscall. We need to verify this.
1713 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1717 ldo -16(%r30),%r29 /* Reference param save area */
1720 BL do_notify_resume,%r2
1721 ldi 1, %r25 /* long in_syscall = 1 */
1724 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1727 b,n syscall_check_sig
1732 /* Are we being ptraced? */
1733 LDREG TASK_TI_FLAGS(%r1),%r19
1734 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1735 and,COND(=) %r19,%r2,%r0
1736 b,n syscall_restore_rfi
1738 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1741 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1744 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1745 LDREG TASK_PT_GR19(%r1),%r19
1746 LDREG TASK_PT_GR20(%r1),%r20
1747 LDREG TASK_PT_GR21(%r1),%r21
1748 LDREG TASK_PT_GR22(%r1),%r22
1749 LDREG TASK_PT_GR23(%r1),%r23
1750 LDREG TASK_PT_GR24(%r1),%r24
1751 LDREG TASK_PT_GR25(%r1),%r25
1752 LDREG TASK_PT_GR26(%r1),%r26
1753 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1754 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1755 LDREG TASK_PT_GR29(%r1),%r29
1756 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1758 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1759 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1761 copy %r1,%r30 /* Restore user sp */
1762 mfsp %sr3,%r1 /* Get user space id */
1763 mtsp %r1,%sr7 /* Restore sr7 */
1766 /* Set sr2 to zero for userspace syscalls to work. */
1768 mtsp %r1,%sr4 /* Restore sr4 */
1769 mtsp %r1,%sr5 /* Restore sr5 */
1770 mtsp %r1,%sr6 /* Restore sr6 */
1772 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */
1775 /* decide whether to reset the wide mode bit
1777 * For a syscall, the W bit is stored in the lowest bit
1778 * of sp. Extract it and reset W if it is zero */
1779 extrd,u,*<> %r30,63,1,%r1
1781 /* now reset the lowest bit of sp if it was set */
1784 be,n 0(%sr3,%r31) /* return to user space */
1786 /* We have to return via an RFI, so that PSW T and R bits can be set
1788 * This sets up pt_regs so we can return via intr_restore, which is not
1789 * the most efficient way of doing things, but it works.
1791 syscall_restore_rfi:
1792 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1793 mtctl %r2,%cr0 /* for immediate trap */
1794 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1795 ldi 0x0b,%r20 /* Create new PSW */
1796 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1798 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1799 * set in thread_info.h and converted to PA bitmap
1800 * numbers in asm-offsets.c */
1802 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1803 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1804 depi -1,27,1,%r20 /* R bit */
1806 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1807 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1808 depi -1,7,1,%r20 /* T bit */
1810 STREG %r20,TASK_PT_PSW(%r1)
1812 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1815 STREG %r25,TASK_PT_SR3(%r1)
1816 STREG %r25,TASK_PT_SR4(%r1)
1817 STREG %r25,TASK_PT_SR5(%r1)
1818 STREG %r25,TASK_PT_SR6(%r1)
1819 STREG %r25,TASK_PT_SR7(%r1)
1820 STREG %r25,TASK_PT_IASQ0(%r1)
1821 STREG %r25,TASK_PT_IASQ1(%r1)
1824 /* Now if old D bit is clear, it means we didn't save all registers
1825 * on syscall entry, so do that now. This only happens on TRACEME
1826 * calls, or if someone attached to us while we were on a syscall.
1827 * We could make this more efficient by not saving r3-r18, but
1828 * then we wouldn't be able to use the common intr_restore path.
1829 * It is only for traced processes anyway, so performance is not
1832 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1833 ldo TASK_REGS(%r1),%r25
1834 reg_save %r25 /* Save r3 to r18 */
1836 /* Save the current sr */
1838 STREG %r2,TASK_PT_SR0(%r1)
1840 /* Save the scratch sr */
1842 STREG %r2,TASK_PT_SR1(%r1)
1844 /* sr2 should be set to zero for userspace syscalls */
1845 STREG %r0,TASK_PT_SR2(%r1)
1847 LDREG TASK_PT_GR31(%r1),%r2
1848 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1849 STREG %r2,TASK_PT_IAOQ0(%r1)
1851 STREG %r2,TASK_PT_IAOQ1(%r1)
1856 LDREG TASK_PT_IAOQ0(%r1),%r2
1857 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1858 STREG %r2,TASK_PT_IAOQ0(%r1)
1859 LDREG TASK_PT_IAOQ1(%r1),%r2
1860 depi PRIV_USER,31,2,%r2
1861 STREG %r2,TASK_PT_IAOQ1(%r1)
1866 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1867 load32 schedule,%r19
1868 bv %r0(%r19) /* jumps to schedule() */
1870 ldo -16(%r30),%r29 /* Reference param save area */
1877 #ifdef CONFIG_FUNCTION_TRACER
1879 .import ftrace_function_trampoline,code
1880 .align L1_CACHE_BYTES
1881 ENTRY_CFI(mcount, caller)
1883 .export _mcount,data
1885 * The 64bit mcount() function pointer needs 4 dwords, of which the
1886 * first two are free. We optimize it here and put 2 instructions for
1887 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1888 * have all on one L1 cacheline.
1891 b ftrace_function_trampoline
1892 copy %r3, %arg2 /* caller original %sp */
1895 .type ftrace_stub, @function
1904 .dword 0 /* code in head.S puts value of global gp here */
1908 #ifdef CONFIG_DYNAMIC_FTRACE
1911 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1913 #define FTRACE_FRAME_SIZE FRAME_SIZE
1915 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1917 .global ftrace_caller
1919 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1920 ldo -FTRACE_FRAME_SIZE(%sp), %r3
1921 STREG %rp, -RP_OFFSET(%r3)
1923 /* Offset 0 is already allocated for %r1 */
1924 STREG %r23, 2*REG_SZ(%r3)
1925 STREG %r24, 3*REG_SZ(%r3)
1926 STREG %r25, 4*REG_SZ(%r3)
1927 STREG %r26, 5*REG_SZ(%r3)
1928 STREG %r28, 6*REG_SZ(%r3)
1929 STREG %r29, 7*REG_SZ(%r3)
1931 STREG %r19, 8*REG_SZ(%r3)
1932 STREG %r20, 9*REG_SZ(%r3)
1933 STREG %r21, 10*REG_SZ(%r3)
1934 STREG %r22, 11*REG_SZ(%r3)
1935 STREG %r27, 12*REG_SZ(%r3)
1936 STREG %r31, 13*REG_SZ(%r3)
1943 ldi 0, %r23 /* no pt_regs */
1944 b,l ftrace_function_trampoline, %rp
1947 LDREG -RP_OFFSET(%r3), %rp
1948 LDREG 2*REG_SZ(%r3), %r23
1949 LDREG 3*REG_SZ(%r3), %r24
1950 LDREG 4*REG_SZ(%r3), %r25
1951 LDREG 5*REG_SZ(%r3), %r26
1952 LDREG 6*REG_SZ(%r3), %r28
1953 LDREG 7*REG_SZ(%r3), %r29
1955 LDREG 8*REG_SZ(%r3), %r19
1956 LDREG 9*REG_SZ(%r3), %r20
1957 LDREG 10*REG_SZ(%r3), %r21
1958 LDREG 11*REG_SZ(%r3), %r22
1959 LDREG 12*REG_SZ(%r3), %r27
1960 LDREG 13*REG_SZ(%r3), %r31
1962 LDREG 1*REG_SZ(%r3), %r3
1964 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
1965 /* Adjust return point to jump back to beginning of traced function */
1969 ENDPROC_CFI(ftrace_caller)
1971 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1972 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1973 CALLS,SAVE_RP,SAVE_SP)
1975 .global ftrace_regs_caller
1977 ldo -FTRACE_FRAME_SIZE(%sp), %r1
1978 STREG %rp, -RP_OFFSET(%r1)
1981 ldo PT_SZ_ALGN(%sp), %sp
1983 STREG %rp, PT_GR2(%r1)
1984 STREG %r3, PT_GR3(%r1)
1985 STREG %r4, PT_GR4(%r1)
1986 STREG %r5, PT_GR5(%r1)
1987 STREG %r6, PT_GR6(%r1)
1988 STREG %r7, PT_GR7(%r1)
1989 STREG %r8, PT_GR8(%r1)
1990 STREG %r9, PT_GR9(%r1)
1991 STREG %r10, PT_GR10(%r1)
1992 STREG %r11, PT_GR11(%r1)
1993 STREG %r12, PT_GR12(%r1)
1994 STREG %r13, PT_GR13(%r1)
1995 STREG %r14, PT_GR14(%r1)
1996 STREG %r15, PT_GR15(%r1)
1997 STREG %r16, PT_GR16(%r1)
1998 STREG %r17, PT_GR17(%r1)
1999 STREG %r18, PT_GR18(%r1)
2000 STREG %r19, PT_GR19(%r1)
2001 STREG %r20, PT_GR20(%r1)
2002 STREG %r21, PT_GR21(%r1)
2003 STREG %r22, PT_GR22(%r1)
2004 STREG %r23, PT_GR23(%r1)
2005 STREG %r24, PT_GR24(%r1)
2006 STREG %r25, PT_GR25(%r1)
2007 STREG %r26, PT_GR26(%r1)
2008 STREG %r27, PT_GR27(%r1)
2009 STREG %r28, PT_GR28(%r1)
2010 STREG %r29, PT_GR29(%r1)
2011 STREG %r30, PT_GR30(%r1)
2012 STREG %r31, PT_GR31(%r1)
2014 STREG %r26, PT_SAR(%r1)
2017 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2019 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2020 b,l ftrace_function_trampoline, %rp
2021 copy %r1, %arg3 /* struct pt_regs */
2023 ldo -PT_SZ_ALGN(%sp), %r1
2025 LDREG PT_SAR(%r1), %rp
2028 LDREG PT_GR2(%r1), %rp
2029 LDREG PT_GR3(%r1), %r3
2030 LDREG PT_GR4(%r1), %r4
2031 LDREG PT_GR5(%r1), %r5
2032 LDREG PT_GR6(%r1), %r6
2033 LDREG PT_GR7(%r1), %r7
2034 LDREG PT_GR8(%r1), %r8
2035 LDREG PT_GR9(%r1), %r9
2036 LDREG PT_GR10(%r1),%r10
2037 LDREG PT_GR11(%r1),%r11
2038 LDREG PT_GR12(%r1),%r12
2039 LDREG PT_GR13(%r1),%r13
2040 LDREG PT_GR14(%r1),%r14
2041 LDREG PT_GR15(%r1),%r15
2042 LDREG PT_GR16(%r1),%r16
2043 LDREG PT_GR17(%r1),%r17
2044 LDREG PT_GR18(%r1),%r18
2045 LDREG PT_GR19(%r1),%r19
2046 LDREG PT_GR20(%r1),%r20
2047 LDREG PT_GR21(%r1),%r21
2048 LDREG PT_GR22(%r1),%r22
2049 LDREG PT_GR23(%r1),%r23
2050 LDREG PT_GR24(%r1),%r24
2051 LDREG PT_GR25(%r1),%r25
2052 LDREG PT_GR26(%r1),%r26
2053 LDREG PT_GR27(%r1),%r27
2054 LDREG PT_GR28(%r1),%r28
2055 LDREG PT_GR29(%r1),%r29
2056 LDREG PT_GR30(%r1),%r30
2057 LDREG PT_GR31(%r1),%r31
2059 ldo -PT_SZ_ALGN(%sp), %sp
2060 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2061 /* Adjust return point to jump back to beginning of traced function */
2065 ENDPROC_CFI(ftrace_regs_caller)
2070 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2072 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2073 .export parisc_return_to_handler,data
2074 parisc_return_to_handler:
2076 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2078 STREGM %r1,FRAME_SIZE(%sp)
2086 /* call ftrace_return_to_handler(0) */
2087 .import ftrace_return_to_handler,code
2088 load32 ftrace_return_to_handler,%ret0
2089 load32 .Lftrace_ret,%r2
2091 ldo -16(%sp),%ret1 /* Reference param save area */
2100 /* restore original return values */
2104 /* return from function */
2110 LDREGM -FRAME_SIZE(%sp),%r3
2111 ENDPROC_CFI(return_to_handler)
2113 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2115 #endif /* CONFIG_FUNCTION_TRACER */
2117 #ifdef CONFIG_IRQSTACKS
2118 /* void call_on_stack(unsigned long param1, void *func,
2119 unsigned long new_stack) */
2120 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2121 ENTRY(_call_on_stack)
2124 /* Regarding the HPPA calling conventions for function pointers,
2125 we assume the PIC register is not changed across call. For
2126 CONFIG_64BIT, the argument pointer is left to point at the
2127 argument region allocated for the call to call_on_stack. */
2129 /* Switch to new stack. We allocate two frames. */
2130 ldo 2*FRAME_SIZE(%arg2), %sp
2131 # ifdef CONFIG_64BIT
2132 /* Save previous stack pointer and return pointer in frame marker */
2133 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2134 /* Calls always use function descriptor */
2135 LDREG 16(%arg1), %arg1
2137 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2138 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2140 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2142 /* Save previous stack pointer and return pointer in frame marker */
2143 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2144 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2145 /* Calls use function descriptor if PLABEL bit is set */
2146 bb,>=,n %arg1, 30, 1f
2148 LDREG 0(%arg1), %arg1
2150 be,l 0(%sr4,%arg1), %sr0, %r31
2152 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2154 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2155 # endif /* CONFIG_64BIT */
2156 ENDPROC_CFI(call_on_stack)
2157 #endif /* CONFIG_IRQSTACKS */
2159 ENTRY_CFI(get_register)
2161 * get_register is used by the non access tlb miss handlers to
2162 * copy the value of the general register specified in r8 into
2163 * r1. This routine can't be used for shadowed registers, since
2164 * the rfir will restore the original value. So, for the shadowed
2165 * registers we put a -1 into r1 to indicate that the register
2166 * should not be used (the register being copied could also have
2167 * a -1 in it, but that is OK, it just means that we will have
2168 * to use the slow path instead).
2172 bv %r0(%r25) /* r0 */
2174 bv %r0(%r25) /* r1 - shadowed */
2176 bv %r0(%r25) /* r2 */
2178 bv %r0(%r25) /* r3 */
2180 bv %r0(%r25) /* r4 */
2182 bv %r0(%r25) /* r5 */
2184 bv %r0(%r25) /* r6 */
2186 bv %r0(%r25) /* r7 */
2188 bv %r0(%r25) /* r8 - shadowed */
2190 bv %r0(%r25) /* r9 - shadowed */
2192 bv %r0(%r25) /* r10 */
2194 bv %r0(%r25) /* r11 */
2196 bv %r0(%r25) /* r12 */
2198 bv %r0(%r25) /* r13 */
2200 bv %r0(%r25) /* r14 */
2202 bv %r0(%r25) /* r15 */
2204 bv %r0(%r25) /* r16 - shadowed */
2206 bv %r0(%r25) /* r17 - shadowed */
2208 bv %r0(%r25) /* r18 */
2210 bv %r0(%r25) /* r19 */
2212 bv %r0(%r25) /* r20 */
2214 bv %r0(%r25) /* r21 */
2216 bv %r0(%r25) /* r22 */
2218 bv %r0(%r25) /* r23 */
2220 bv %r0(%r25) /* r24 - shadowed */
2222 bv %r0(%r25) /* r25 - shadowed */
2224 bv %r0(%r25) /* r26 */
2226 bv %r0(%r25) /* r27 */
2228 bv %r0(%r25) /* r28 */
2230 bv %r0(%r25) /* r29 */
2232 bv %r0(%r25) /* r30 */
2234 bv %r0(%r25) /* r31 */
2236 ENDPROC_CFI(get_register)
2239 ENTRY_CFI(set_register)
2241 * set_register is used by the non access tlb miss handlers to
2242 * copy the value of r1 into the general register specified in
2247 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2249 bv %r0(%r25) /* r1 */
2251 bv %r0(%r25) /* r2 */
2253 bv %r0(%r25) /* r3 */
2255 bv %r0(%r25) /* r4 */
2257 bv %r0(%r25) /* r5 */
2259 bv %r0(%r25) /* r6 */
2261 bv %r0(%r25) /* r7 */
2263 bv %r0(%r25) /* r8 */
2265 bv %r0(%r25) /* r9 */
2267 bv %r0(%r25) /* r10 */
2269 bv %r0(%r25) /* r11 */
2271 bv %r0(%r25) /* r12 */
2273 bv %r0(%r25) /* r13 */
2275 bv %r0(%r25) /* r14 */
2277 bv %r0(%r25) /* r15 */
2279 bv %r0(%r25) /* r16 */
2281 bv %r0(%r25) /* r17 */
2283 bv %r0(%r25) /* r18 */
2285 bv %r0(%r25) /* r19 */
2287 bv %r0(%r25) /* r20 */
2289 bv %r0(%r25) /* r21 */
2291 bv %r0(%r25) /* r22 */
2293 bv %r0(%r25) /* r23 */
2295 bv %r0(%r25) /* r24 */
2297 bv %r0(%r25) /* r25 */
2299 bv %r0(%r25) /* r26 */
2301 bv %r0(%r25) /* r27 */
2303 bv %r0(%r25) /* r28 */
2305 bv %r0(%r25) /* r29 */
2307 bv %r0(%r25) /* r30 */
2309 bv %r0(%r25) /* r31 */
2311 ENDPROC_CFI(set_register)