GNU Linux-libre 4.9.315-gnu1
[releases.git] / arch / parisc / include / asm / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #include <asm/barrier.h>
5 #include <asm/ldcw.h>
6 #include <asm/processor.h>
7 #include <asm/spinlock_types.h>
8
9 static inline int arch_spin_is_locked(arch_spinlock_t *x)
10 {
11         volatile unsigned int *a = __ldcw_align(x);
12         return *a == 0;
13 }
14
15 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
16
17 static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
18 {
19         volatile unsigned int *a = __ldcw_align(x);
20
21         smp_cond_load_acquire(a, VAL);
22 }
23
24 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
25                                          unsigned long flags)
26 {
27         volatile unsigned int *a;
28
29         a = __ldcw_align(x);
30         while (__ldcw(a) == 0)
31                 while (*a == 0)
32                         if (flags & PSW_SM_I) {
33                                 local_irq_enable();
34                                 cpu_relax();
35                                 local_irq_disable();
36                         } else
37                                 cpu_relax();
38 }
39
40 static inline void arch_spin_unlock(arch_spinlock_t *x)
41 {
42         volatile unsigned int *a;
43
44         a = __ldcw_align(x);
45         mb();
46         *a = 1;
47 }
48
49 static inline int arch_spin_trylock(arch_spinlock_t *x)
50 {
51         volatile unsigned int *a;
52         int ret;
53
54         a = __ldcw_align(x);
55         ret = __ldcw(a) != 0;
56
57         return ret;
58 }
59
60 /*
61  * Read-write spinlocks, allowing multiple readers but only one writer.
62  * Linux rwlocks are unfair to writers; they can be starved for an indefinite
63  * time by readers.  With care, they can also be taken in interrupt context.
64  *
65  * In the PA-RISC implementation, we have a spinlock and a counter.
66  * Readers use the lock to serialise their access to the counter (which
67  * records how many readers currently hold the lock).
68  * Writers hold the spinlock, preventing any readers or other writers from
69  * grabbing the rwlock.
70  */
71
72 /* Note that we have to ensure interrupts are disabled in case we're
73  * interrupted by some other code that wants to grab the same read lock */
74 static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
75 {
76         unsigned long flags;
77         local_irq_save(flags);
78         arch_spin_lock_flags(&rw->lock, flags);
79         rw->counter++;
80         arch_spin_unlock(&rw->lock);
81         local_irq_restore(flags);
82 }
83
84 /* Note that we have to ensure interrupts are disabled in case we're
85  * interrupted by some other code that wants to grab the same read lock */
86 static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
87 {
88         unsigned long flags;
89         local_irq_save(flags);
90         arch_spin_lock_flags(&rw->lock, flags);
91         rw->counter--;
92         arch_spin_unlock(&rw->lock);
93         local_irq_restore(flags);
94 }
95
96 /* Note that we have to ensure interrupts are disabled in case we're
97  * interrupted by some other code that wants to grab the same read lock */
98 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
99 {
100         unsigned long flags;
101  retry:
102         local_irq_save(flags);
103         if (arch_spin_trylock(&rw->lock)) {
104                 rw->counter++;
105                 arch_spin_unlock(&rw->lock);
106                 local_irq_restore(flags);
107                 return 1;
108         }
109
110         local_irq_restore(flags);
111         /* If write-locked, we fail to acquire the lock */
112         if (rw->counter < 0)
113                 return 0;
114
115         /* Wait until we have a realistic chance at the lock */
116         while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
117                 cpu_relax();
118
119         goto retry;
120 }
121
122 /* Note that we have to ensure interrupts are disabled in case we're
123  * interrupted by some other code that wants to read_trylock() this lock */
124 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
125 {
126         unsigned long flags;
127 retry:
128         local_irq_save(flags);
129         arch_spin_lock_flags(&rw->lock, flags);
130
131         if (rw->counter != 0) {
132                 arch_spin_unlock(&rw->lock);
133                 local_irq_restore(flags);
134
135                 while (rw->counter != 0)
136                         cpu_relax();
137
138                 goto retry;
139         }
140
141         rw->counter = -1; /* mark as write-locked */
142         mb();
143         local_irq_restore(flags);
144 }
145
146 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
147 {
148         rw->counter = 0;
149         arch_spin_unlock(&rw->lock);
150 }
151
152 /* Note that we have to ensure interrupts are disabled in case we're
153  * interrupted by some other code that wants to read_trylock() this lock */
154 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
155 {
156         unsigned long flags;
157         int result = 0;
158
159         local_irq_save(flags);
160         if (arch_spin_trylock(&rw->lock)) {
161                 if (rw->counter == 0) {
162                         rw->counter = -1;
163                         result = 1;
164                 } else {
165                         /* Read-locked.  Oh well. */
166                         arch_spin_unlock(&rw->lock);
167                 }
168         }
169         local_irq_restore(flags);
170
171         return result;
172 }
173
174 /*
175  * read_can_lock - would read_trylock() succeed?
176  * @lock: the rwlock in question.
177  */
178 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
179 {
180         return rw->counter >= 0;
181 }
182
183 /*
184  * write_can_lock - would write_trylock() succeed?
185  * @lock: the rwlock in question.
186  */
187 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
188 {
189         return !rw->counter;
190 }
191
192 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
193 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
194
195 #endif /* __ASM_SPINLOCK_H */