1 // SPDX-License-Identifier: GPL-2.0
2 /* linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/debug.h>
15 #include <linux/file.h>
17 #include <linux/sem.h>
18 #include <linux/msg.h>
19 #include <linux/shm.h>
20 #include <linux/stat.h>
21 #include <linux/mman.h>
22 #include <linux/utsname.h>
23 #include <linux/smp.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
28 #include <linux/random.h>
29 #include <linux/export.h>
30 #include <linux/context_tracking.h>
31 #include <linux/timex.h>
32 #include <linux/uaccess.h>
34 #include <asm/utrap.h>
35 #include <asm/unistd.h>
41 /* #define DEBUG_UNIMP_SYSCALL */
43 SYSCALL_DEFINE0(getpagesize)
48 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
49 * overflow past the end of the 64-bit address space?
51 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
53 unsigned long va_exclude_start, va_exclude_end;
55 va_exclude_start = VA_EXCLUDE_START;
56 va_exclude_end = VA_EXCLUDE_END;
58 if (unlikely(len >= va_exclude_start))
61 if (unlikely((addr + len) < addr))
64 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
65 ((addr + len) >= va_exclude_start &&
66 (addr + len) < va_exclude_end)))
72 /* These functions differ from the default implementations in
73 * mm/mmap.c in two ways:
75 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
76 * for fixed such mappings we just validate what the user gave us.
77 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
78 * the spitfire/niagara VA-hole.
81 static inline unsigned long COLOR_ALIGN(unsigned long addr,
84 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
85 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
90 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct * vma;
94 unsigned long task_size = TASK_SIZE;
96 struct vm_unmapped_area_info info;
98 if (flags & MAP_FIXED) {
99 /* We do not accept a shared mapping if it would violate
100 * cache aliasing constraints.
102 if ((flags & MAP_SHARED) &&
103 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
108 if (test_thread_flag(TIF_32BIT))
109 task_size = STACK_TOP32;
110 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
114 if (filp || (flags & MAP_SHARED))
119 addr = COLOR_ALIGN(addr, pgoff);
121 addr = PAGE_ALIGN(addr);
123 vma = find_vma(mm, addr);
124 if (task_size - len >= addr &&
125 (!vma || addr + len <= vm_start_gap(vma)))
131 info.low_limit = TASK_UNMAPPED_BASE;
132 info.high_limit = min(task_size, VA_EXCLUDE_START);
133 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
134 info.align_offset = pgoff << PAGE_SHIFT;
135 addr = vm_unmapped_area(&info);
137 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
138 VM_BUG_ON(addr != -ENOMEM);
139 info.low_limit = VA_EXCLUDE_END;
140 info.high_limit = task_size;
141 addr = vm_unmapped_area(&info);
148 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
149 const unsigned long len, const unsigned long pgoff,
150 const unsigned long flags)
152 struct vm_area_struct *vma;
153 struct mm_struct *mm = current->mm;
154 unsigned long task_size = STACK_TOP32;
155 unsigned long addr = addr0;
157 struct vm_unmapped_area_info info;
159 /* This should only ever run for 32-bit processes. */
160 BUG_ON(!test_thread_flag(TIF_32BIT));
162 if (flags & MAP_FIXED) {
163 /* We do not accept a shared mapping if it would violate
164 * cache aliasing constraints.
166 if ((flags & MAP_SHARED) &&
167 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
172 if (unlikely(len > task_size))
176 if (filp || (flags & MAP_SHARED))
179 /* requesting a specific address */
182 addr = COLOR_ALIGN(addr, pgoff);
184 addr = PAGE_ALIGN(addr);
186 vma = find_vma(mm, addr);
187 if (task_size - len >= addr &&
188 (!vma || addr + len <= vm_start_gap(vma)))
192 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
194 info.low_limit = PAGE_SIZE;
195 info.high_limit = mm->mmap_base;
196 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
197 info.align_offset = pgoff << PAGE_SHIFT;
198 addr = vm_unmapped_area(&info);
201 * A failed mmap() very likely causes application failure,
202 * so fall back to the bottom-up function here. This scenario
203 * can happen with large stack limits and large mmap()
206 if (addr & ~PAGE_MASK) {
207 VM_BUG_ON(addr != -ENOMEM);
209 info.low_limit = TASK_UNMAPPED_BASE;
210 info.high_limit = STACK_TOP32;
211 addr = vm_unmapped_area(&info);
217 /* Try to align mapping such that we align it as much as possible. */
218 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
220 unsigned long align_goal, addr = -ENOMEM;
221 unsigned long (*get_area)(struct file *, unsigned long,
222 unsigned long, unsigned long, unsigned long);
224 get_area = current->mm->get_unmapped_area;
226 if (flags & MAP_FIXED) {
227 /* Ok, don't mess with it. */
228 return get_area(NULL, orig_addr, len, pgoff, flags);
230 flags &= ~MAP_SHARED;
232 align_goal = PAGE_SIZE;
233 if (len >= (4UL * 1024 * 1024))
234 align_goal = (4UL * 1024 * 1024);
235 else if (len >= (512UL * 1024))
236 align_goal = (512UL * 1024);
237 else if (len >= (64UL * 1024))
238 align_goal = (64UL * 1024);
241 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
242 if (!(addr & ~PAGE_MASK)) {
243 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
247 if (align_goal == (4UL * 1024 * 1024))
248 align_goal = (512UL * 1024);
249 else if (align_goal == (512UL * 1024))
250 align_goal = (64UL * 1024);
252 align_goal = PAGE_SIZE;
253 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
255 /* Mapping is smaller than 64K or larger areas could not
258 if (addr & ~PAGE_MASK)
259 addr = get_area(NULL, orig_addr, len, pgoff, flags);
263 EXPORT_SYMBOL(get_fb_unmapped_area);
265 /* Essentially the same as PowerPC. */
266 static unsigned long mmap_rnd(void)
268 unsigned long rnd = 0UL;
270 if (current->flags & PF_RANDOMIZE) {
271 unsigned long val = get_random_long();
272 if (test_thread_flag(TIF_32BIT))
273 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
275 rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
277 return rnd << PAGE_SHIFT;
280 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
282 unsigned long random_factor = mmap_rnd();
286 * Fall back to the standard layout if the personality
287 * bit is set, or if the expected stack growth is unlimited:
289 gap = rlim_stack->rlim_cur;
290 if (!test_thread_flag(TIF_32BIT) ||
291 (current->personality & ADDR_COMPAT_LAYOUT) ||
292 gap == RLIM_INFINITY ||
293 sysctl_legacy_va_layout) {
294 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
295 mm->get_unmapped_area = arch_get_unmapped_area;
297 /* We know it's 32-bit */
298 unsigned long task_size = STACK_TOP32;
300 if (gap < 128 * 1024 * 1024)
301 gap = 128 * 1024 * 1024;
302 if (gap > (task_size / 6 * 5))
303 gap = (task_size / 6 * 5);
305 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
306 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
311 * sys_pipe() is the normal C calling standard for creating
312 * a pipe. It's not the way unix traditionally does this, though.
314 SYSCALL_DEFINE0(sparc_pipe)
319 error = do_pipe_flags(fd, 0);
322 current_pt_regs()->u_regs[UREG_I1] = fd[1];
329 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
331 * This is really horribly ugly.
334 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
335 unsigned long, third, void __user *, ptr, long, fifth)
339 if (!IS_ENABLED(CONFIG_SYSVIPC))
342 /* No need for backward compatibility. We can start fresh... */
343 if (call <= SEMTIMEDOP) {
346 err = ksys_semtimedop(first, ptr,
347 (unsigned int)second, NULL);
350 err = ksys_semtimedop(first, ptr, (unsigned int)second,
351 (const struct __kernel_timespec __user *)
352 (unsigned long) fifth);
355 err = ksys_semget(first, (int)second, (int)third);
358 err = ksys_old_semctl(first, second,
360 (unsigned long) ptr);
368 if (call <= MSGCTL) {
371 err = ksys_msgsnd(first, ptr, (size_t)second,
375 err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
379 err = ksys_msgget((key_t)first, (int)second);
382 err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
389 if (call <= SHMCTL) {
393 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
396 (ulong __user *) third))
402 err = ksys_shmdt(ptr);
405 err = ksys_shmget(first, (size_t)second, (int)third);
408 err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
421 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
425 if (personality(current->personality) == PER_LINUX32 &&
426 personality(personality) == PER_LINUX)
427 personality |= PER_LINUX32;
428 ret = sys_personality(personality);
429 if (personality(ret) == PER_LINUX32)
435 int sparc_mmap_check(unsigned long addr, unsigned long len)
437 if (test_thread_flag(TIF_32BIT)) {
438 if (len >= STACK_TOP32)
441 if (addr > STACK_TOP32 - len)
444 if (len >= VA_EXCLUDE_START)
447 if (invalid_64bit_range(addr, len))
454 /* Linux version of mmap */
455 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
456 unsigned long, prot, unsigned long, flags, unsigned long, fd,
459 unsigned long retval = -EINVAL;
461 if ((off + PAGE_ALIGN(len)) < off)
463 if (off & ~PAGE_MASK)
465 retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
470 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
472 if (invalid_64bit_range(addr, len))
475 return vm_munmap(addr, len);
478 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
479 unsigned long, new_len, unsigned long, flags,
480 unsigned long, new_addr)
482 if (test_thread_flag(TIF_32BIT))
484 return sys_mremap(addr, old_len, new_len, flags, new_addr);
487 SYSCALL_DEFINE0(nis_syscall)
490 struct pt_regs *regs = current_pt_regs();
492 /* Don't make the system unusable, if someone goes stuck */
496 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
497 #ifdef DEBUG_UNIMP_SYSCALL
504 /* #define DEBUG_SPARC_BREAKPOINT */
506 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
508 enum ctx_state prev_state = exception_enter();
510 if (test_thread_flag(TIF_32BIT)) {
511 regs->tpc &= 0xffffffff;
512 regs->tnpc &= 0xffffffff;
514 #ifdef DEBUG_SPARC_BREAKPOINT
515 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
517 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc, 0);
518 #ifdef DEBUG_SPARC_BREAKPOINT
519 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
521 exception_exit(prev_state);
524 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
527 char tmp[__NEW_UTS_LEN + 1];
534 nlen = strlen(utsname()->domainname) + 1;
538 memcpy(tmp, utsname()->domainname, nlen);
542 if (copy_to_user(name, tmp, nlen))
551 SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p)
553 struct timex txc; /* Local copy of parameter */
554 struct __kernel_timex *kt = (void *)&txc;
557 /* Copy the user data space into the kernel copy
558 * structure. But bear in mind that the structures
561 if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
565 * override for sparc64 specific timeval type: tv_usec
566 * is 32 bit wide instead of 64-bit in __kernel_timex
568 kt->time.tv_usec = txc.time.tv_usec;
569 ret = do_adjtimex(kt);
570 txc.time.tv_usec = kt->time.tv_usec;
572 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
575 SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p)
577 struct timex txc; /* Local copy of parameter */
578 struct __kernel_timex *kt = (void *)&txc;
581 if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
582 pr_err_once("process %d (%s) attempted a POSIX timer syscall "
583 "while CONFIG_POSIX_TIMERS is not set\n",
584 current->pid, current->comm);
589 /* Copy the user data space into the kernel copy
590 * structure. But bear in mind that the structures
593 if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
597 * override for sparc64 specific timeval type: tv_usec
598 * is 32 bit wide instead of 64-bit in __kernel_timex
600 kt->time.tv_usec = txc.time.tv_usec;
601 ret = do_clock_adjtime(which_clock, kt);
602 txc.time.tv_usec = kt->time.tv_usec;
604 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
607 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
608 utrap_handler_t, new_p, utrap_handler_t, new_d,
609 utrap_handler_t __user *, old_p,
610 utrap_handler_t __user *, old_d)
612 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
614 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
616 if (!current_thread_info()->utraps) {
617 if (put_user(NULL, old_p))
620 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
625 if (put_user(NULL, old_d))
630 if (!current_thread_info()->utraps) {
631 current_thread_info()->utraps =
632 kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
634 if (!current_thread_info()->utraps)
636 current_thread_info()->utraps[0] = 1;
638 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
639 current_thread_info()->utraps[0] > 1) {
640 unsigned long *p = current_thread_info()->utraps;
642 current_thread_info()->utraps =
643 kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
646 if (!current_thread_info()->utraps) {
647 current_thread_info()->utraps = p;
651 current_thread_info()->utraps[0] = 1;
652 memcpy(current_thread_info()->utraps+1, p+1,
653 UT_TRAP_INSTRUCTION_31*sizeof(long));
657 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
661 if (put_user(NULL, old_d))
664 current_thread_info()->utraps[type] = (long)new_p;
669 SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
671 struct pt_regs *regs = current_pt_regs();
674 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
678 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
679 struct sigaction __user *, oact, void __user *, restorer,
682 struct k_sigaction new_ka, old_ka;
685 /* XXX: Don't preclude handling different sized sigset_t's. */
686 if (sigsetsize != sizeof(sigset_t))
690 new_ka.ka_restorer = restorer;
691 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
695 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
698 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
705 SYSCALL_DEFINE0(kern_features)
707 return KERN_FEATURE_MIXED_MODE_STACK;