1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_LNKGET_H
3 #define __ASM_SPINLOCK_LNKGET_H
6 * None of these asm statements clobber memory as LNKSET writes around
7 * the cache so the memory it modifies cannot safely be read by any means
8 * other than these accessors.
11 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
15 asm volatile ("LNKGETD %0, [%1]\n"
25 static inline void arch_spin_lock(arch_spinlock_t *lock)
29 asm volatile ("1: LNKGETD %0,[%1]\n"
32 " LNKSETDZ [%1], %0\n"
35 " ANDT %0, %0, #HI(0x3f000000)\n"
36 " CMPT %0, #HI(0x02000000)\n"
45 /* Returns 0 if failed to acquire lock */
46 static inline int arch_spin_trylock(arch_spinlock_t *lock)
50 asm volatile (" LNKGETD %0,[%1]\n"
53 " LNKSETDZ [%1], %0\n"
56 " ANDT %0, %0, #HI(0x3f000000)\n"
57 " CMPT %0, #HI(0x02000000)\n"
59 "1: XORNZ %0, %0, %0\n"
69 static inline void arch_spin_unlock(arch_spinlock_t *lock)
73 asm volatile (" SETD [%0], %1\n"
75 : "da" (&lock->lock), "da" (0)
83 * Write locks are easy - we just set bit 31. When unlocking, we can
84 * just write zero since the lock is exclusively held.
87 static inline void arch_write_lock(arch_rwlock_t *rw)
91 asm volatile ("1: LNKGETD %0,[%1]\n"
94 " LNKSETDZ [%1], %0\n"
97 " ANDT %0, %0, #HI(0x3f000000)\n"
98 " CMPT %0, #HI(0x02000000)\n"
101 : "da" (&rw->lock), "bd" (0x80000000)
107 static inline int arch_write_trylock(arch_rwlock_t *rw)
111 asm volatile (" LNKGETD %0,[%1]\n"
114 " LNKSETDZ [%1], %0\n"
117 " ANDT %0, %0, #HI(0x3f000000)\n"
118 " CMPT %0, #HI(0x02000000)\n"
120 "1: XORNZ %0, %0, %0\n"
122 : "da" (&rw->lock), "bd" (0x80000000)
130 static inline void arch_write_unlock(arch_rwlock_t *rw)
134 asm volatile (" SETD [%0], %1\n"
136 : "da" (&rw->lock), "da" (0)
140 /* write_can_lock - would write_trylock() succeed? */
141 static inline int arch_write_can_lock(arch_rwlock_t *rw)
145 asm volatile ("LNKGETD %0, [%1]\n"
156 * Read locks are a bit more hairy:
157 * - Exclusively load the lock value.
159 * - Store new lock value if positive, and we still own this location.
160 * If the value is negative, we've already failed.
161 * - If we failed to store the value, we want a negative result.
162 * - If we failed, try again.
163 * Unlocking is similarly hairy. We may have multiple read locks
164 * currently active. However, we know we won't have any write
167 static inline void arch_read_lock(arch_rwlock_t *rw)
171 asm volatile ("1: LNKGETD %0,[%1]\n"
173 " LNKSETDPL [%1], %0\n"
176 " ANDT %0, %0, #HI(0x3f000000)\n"
177 " CMPT %0, #HI(0x02000000)\n"
186 static inline void arch_read_unlock(arch_rwlock_t *rw)
192 asm volatile ("1: LNKGETD %0,[%1]\n"
194 " LNKSETD [%1], %0\n"
196 " ANDT %0, %0, #HI(0x3f000000)\n"
197 " CMPT %0, #HI(0x02000000)\n"
204 static inline int arch_read_trylock(arch_rwlock_t *rw)
208 asm volatile (" LNKGETD %0,[%1]\n"
210 " LNKSETDPL [%1], %0\n"
213 " ANDT %0, %0, #HI(0x3f000000)\n"
214 " CMPT %0, #HI(0x02000000)\n"
228 /* read_can_lock - would read_trylock() succeed? */
229 static inline int arch_read_can_lock(arch_rwlock_t *rw)
233 asm volatile ("LNKGETD %0, [%1]\n"
238 : "da" (&rw->lock), "bd" (0x80000000)
243 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
244 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
246 #define arch_spin_relax(lock) cpu_relax()
247 #define arch_read_relax(lock) cpu_relax()
248 #define arch_write_relax(lock) cpu_relax()
250 #endif /* __ASM_SPINLOCK_LNKGET_H */