1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_DIV64
3 #define __ASM_ARM_DIV64
5 #include <linux/types.h>
6 #include <asm/compiler.h>
9 * The semantics of __div64_32() are:
11 * uint32_t __div64_32(uint64_t *n, uint32_t base)
13 * uint32_t remainder = *n % base;
18 * In other words, a 64-bit dividend with a 32-bit divisor producing
19 * a 64-bit result and a 32-bit remainder. To accomplish this optimally
20 * we override the generic version in lib/div64.c to call our __do_div64
21 * assembly implementation with completely non standard calling convention
22 * for arguments and results (beware).
24 static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
26 register unsigned int __base asm("r4") = base;
27 register unsigned long long __n asm("r0") = *n;
28 register unsigned long long __res asm("r2");
30 asm( __asmeq("%0", "r0")
34 : "+r" (__n), "=r" (__res)
41 #define __div64_32 __div64_32
43 #if !defined(CONFIG_AEABI)
46 * In OABI configurations, some uses of the do_div function
47 * cause gcc to run out of registers. To work around that,
48 * we can force the use of the out-of-line version for
49 * configurations that build a OABI kernel.
51 #define do_div(n, base) __div64_32(&(n), base)
55 static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
57 unsigned long long res;
58 register unsigned int tmp asm("ip") = 0;
61 asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
66 } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
68 asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
74 asm ( "umull %Q0, %R0, %Q2, %Q3\n\t"
76 "adcs %R0, %R0, %R2\n\t"
78 : "=&r" (res), "+&r" (tmp)
83 if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
84 asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
85 "umlal %R0, %Q0, %Q1, %R2\n\t"
87 "umlal %Q0, %R0, %R1, %R2"
92 asm ( "umlal %R0, %Q0, %R2, %Q3\n\t"
93 "umlal %R0, %1, %Q2, %R3\n\t"
95 "adds %Q0, %1, %Q0\n\t"
96 "adc %R0, %R0, #0\n\t"
97 "umlal %Q0, %R0, %R2, %R3"
98 : "+&r" (res), "+&r" (tmp)
105 #define __arch_xprod_64 __arch_xprod_64
107 #include <asm-generic/div64.h>