GNU Linux-libre 4.19.211-gnu1
[releases.git] / include / asm-generic / rwsem.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_RWSEM_H
3 #define _ASM_GENERIC_RWSEM_H
4
5 #ifndef _LINUX_RWSEM_H
6 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
7 #endif
8
9 #ifdef __KERNEL__
10
11 /*
12  * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
13  * Adapted largely from include/asm-i386/rwsem.h
14  * by Paul Mackerras <paulus@samba.org>.
15  */
16
17 /*
18  * the semaphore definition
19  */
20 #ifdef CONFIG_64BIT
21 # define RWSEM_ACTIVE_MASK              0xffffffffL
22 #else
23 # define RWSEM_ACTIVE_MASK              0x0000ffffL
24 #endif
25
26 #define RWSEM_UNLOCKED_VALUE            0x00000000L
27 #define RWSEM_ACTIVE_BIAS               0x00000001L
28 #define RWSEM_WAITING_BIAS              (-RWSEM_ACTIVE_MASK-1)
29 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
30 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
31
32 /*
33  * lock for reading
34  */
35 static inline void __down_read(struct rw_semaphore *sem)
36 {
37         if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
38                 rwsem_down_read_failed(sem);
39 }
40
41 static inline int __down_read_killable(struct rw_semaphore *sem)
42 {
43         if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
44                 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
45                         return -EINTR;
46         }
47
48         return 0;
49 }
50
51 static inline int __down_read_trylock(struct rw_semaphore *sem)
52 {
53         long tmp;
54
55         while ((tmp = atomic_long_read(&sem->count)) >= 0) {
56                 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
57                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
58                         return 1;
59                 }
60         }
61         return 0;
62 }
63
64 /*
65  * lock for writing
66  */
67 static inline void __down_write(struct rw_semaphore *sem)
68 {
69         long tmp;
70
71         tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
72                                              &sem->count);
73         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
74                 rwsem_down_write_failed(sem);
75 }
76
77 static inline int __down_write_killable(struct rw_semaphore *sem)
78 {
79         long tmp;
80
81         tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
82                                              &sem->count);
83         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
84                 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
85                         return -EINTR;
86         return 0;
87 }
88
89 static inline int __down_write_trylock(struct rw_semaphore *sem)
90 {
91         long tmp;
92
93         tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
94                       RWSEM_ACTIVE_WRITE_BIAS);
95         return tmp == RWSEM_UNLOCKED_VALUE;
96 }
97
98 /*
99  * unlock after reading
100  */
101 static inline void __up_read(struct rw_semaphore *sem)
102 {
103         long tmp;
104
105         tmp = atomic_long_dec_return_release(&sem->count);
106         if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
107                 rwsem_wake(sem);
108 }
109
110 /*
111  * unlock after writing
112  */
113 static inline void __up_write(struct rw_semaphore *sem)
114 {
115         if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
116                                                     &sem->count) < 0))
117                 rwsem_wake(sem);
118 }
119
120 /*
121  * downgrade write lock to read lock
122  */
123 static inline void __downgrade_write(struct rw_semaphore *sem)
124 {
125         long tmp;
126
127         /*
128          * When downgrading from exclusive to shared ownership,
129          * anything inside the write-locked region cannot leak
130          * into the read side. In contrast, anything in the
131          * read-locked region is ok to be re-ordered into the
132          * write side. As such, rely on RELEASE semantics.
133          */
134         tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
135         if (tmp < 0)
136                 rwsem_downgrade_wake(sem);
137 }
138
139 #endif  /* __KERNEL__ */
140 #endif  /* _ASM_GENERIC_RWSEM_H */