1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
11 #include <linux/sys.h>
12 #include <asm/unistd.h>
13 #include <asm/errno.h>
16 #include <asm/cache.h>
17 #include <asm/cputable.h>
19 #include <asm/ppc_asm.h>
20 #include <asm/thread_info.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/processor.h>
24 #include <asm/ptrace.h>
25 #include <asm/export.h>
26 #include <asm/feature-fixups.h>
31 * This returns the high 64 bits of the product of two 64-bit numbers.
43 1: beqlr cr1 /* all done if high part of A is 0 */
57 * reloc_got2 runs through the .got2 section adding an offset
62 lis r7,__got2_start@ha
63 addi r7,r7,__got2_start@l
65 addi r8,r8,__got2_end@l
85 * call_setup_cpu - call the setup_cpu function for this cpu
86 * r3 = data offset, r24 = cpu number
88 * Setup function is called with:
90 * r4 = ptr to CPU spec (relocated)
92 _GLOBAL(call_setup_cpu)
93 addis r4,r3,cur_cpu_spec@ha
94 addi r4,r4,cur_cpu_spec@l
97 lwz r5,CPU_SPEC_SETUP(r4)
104 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
106 /* This gets called by via-pmu.c to switch the PLL selection
107 * on 750fx CPU. This function should really be moved to some
108 * other place (as most of the cpufreq code in via-pmu
110 _GLOBAL(low_choose_750fx_pll)
116 /* If switching to PLL1, disable HID0:BTIC */
127 /* Calc new HID1 value */
128 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
129 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
130 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
135 /* Store new HID1 image */
141 addis r6,r6,nap_save_hid1@ha
142 stw r4,nap_save_hid1@l(r6)
144 /* If switching to PLL0, enable HID0:BTIC */
159 _GLOBAL(low_choose_7447a_dfs)
165 /* Calc new HID1 value */
167 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
177 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
182 * Do an IO access in real mode
186 rlwinm r0,r7,0,~MSR_DR
197 _ASM_NOKPROBE_SYMBOL(real_readb)
200 * Do an IO access in real mode
204 rlwinm r0,r7,0,~MSR_DR
215 _ASM_NOKPROBE_SYMBOL(real_writeb)
217 #endif /* CONFIG_40x */
220 * Copy a whole page. We use the dcbz instruction on the destination
221 * to reduce memory traffic (it eliminates the unnecessary reads of
222 * the destination into cache). This requires that the destination
225 #define COPY_16_BYTES \
236 rlwinm r5, r3, 0, L1_CACHE_BYTES - 1
239 0: twnei r5, 0 /* WARN if r3 is not cache aligned */
240 EMIT_WARN_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
246 #if MAX_COPY_PREFETCH > 1
247 li r0,MAX_COPY_PREFETCH
251 addi r11,r11,L1_CACHE_BYTES
253 #else /* MAX_COPY_PREFETCH == 1 */
255 li r11,L1_CACHE_BYTES+4
256 #endif /* MAX_COPY_PREFETCH */
257 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
265 #if L1_CACHE_BYTES >= 32
267 #if L1_CACHE_BYTES >= 64
270 #if L1_CACHE_BYTES >= 128
280 crnot 4*cr0+eq,4*cr0+eq
281 li r0,MAX_COPY_PREFETCH
284 EXPORT_SYMBOL(copy_page)
287 * Extended precision shifts.
289 * Updated to be valid for shift counts from 0 to 63 inclusive.
292 * R3/R4 has 64 bit value
296 * ashrdi3: arithmetic right shift (sign propagation)
297 * lshrdi3: logical right shift
298 * ashldi3: left shift
302 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
303 addi r7,r5,32 # could be xori, or addi with -32
304 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
305 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
306 sraw r7,r3,r7 # t2 = MSW >> (count-32)
307 or r4,r4,r6 # LSW |= t1
308 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
309 sraw r3,r3,r5 # MSW = MSW >> count
310 or r4,r4,r7 # LSW |= t2
312 EXPORT_SYMBOL(__ashrdi3)
316 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
317 addi r7,r5,32 # could be xori, or addi with -32
318 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
319 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
320 or r3,r3,r6 # MSW |= t1
321 slw r4,r4,r5 # LSW = LSW << count
322 or r3,r3,r7 # MSW |= t2
324 EXPORT_SYMBOL(__ashldi3)
328 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
329 addi r7,r5,32 # could be xori, or addi with -32
330 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
331 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
332 or r4,r4,r6 # LSW |= t1
333 srw r3,r3,r5 # MSW = MSW >> count
334 or r4,r4,r7 # LSW |= t2
336 EXPORT_SYMBOL(__lshrdi3)
339 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
340 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
352 EXPORT_SYMBOL(__cmpdi2)
354 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
355 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
367 EXPORT_SYMBOL(__ucmpdi2)
374 rlwimi r9,r4,24,16,23
375 rlwimi r10,r3,24,16,23
379 EXPORT_SYMBOL(__bswapdi2)
382 _GLOBAL(start_secondary_resume)
384 rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT
385 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
387 stw r3,0(r1) /* Zero the stack frame pointer */
390 #endif /* CONFIG_SMP */