2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
24 #include "extent_io.h"
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
30 * if we currently have a spinning reader or writer lock
31 * (indicated by the rw flag) this will bump the count
32 * of blocking holders and drop the spinlock.
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
37 * no lock is required. The lock owner may change if
38 * we have a read lock, but it won't change to or away
39 * from us. If we have the write lock, we are the owner
40 * and it'll never change.
42 if (eb->lock_nested && current->pid == eb->lock_owner)
44 if (rw == BTRFS_WRITE_LOCK) {
45 if (atomic_read(&eb->blocking_writers) == 0) {
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 atomic_dec(&eb->spinning_writers);
48 btrfs_assert_tree_locked(eb);
49 atomic_inc(&eb->blocking_writers);
50 write_unlock(&eb->lock);
52 } else if (rw == BTRFS_READ_LOCK) {
53 btrfs_assert_tree_read_locked(eb);
54 atomic_inc(&eb->blocking_readers);
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 atomic_dec(&eb->spinning_readers);
57 read_unlock(&eb->lock);
62 * if we currently have a blocking lock, take the spinlock
63 * and drop our blocking count
65 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
68 * no lock is required. The lock owner may change if
69 * we have a read lock, but it won't change to or away
70 * from us. If we have the write lock, we are the owner
71 * and it'll never change.
73 if (eb->lock_nested && current->pid == eb->lock_owner)
76 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
77 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
78 write_lock(&eb->lock);
79 WARN_ON(atomic_read(&eb->spinning_writers));
80 atomic_inc(&eb->spinning_writers);
82 * atomic_dec_and_test implies a barrier for waitqueue_active
84 if (atomic_dec_and_test(&eb->blocking_writers) &&
85 waitqueue_active(&eb->write_lock_wq))
86 wake_up(&eb->write_lock_wq);
87 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
88 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
90 atomic_inc(&eb->spinning_readers);
92 * atomic_dec_and_test implies a barrier for waitqueue_active
94 if (atomic_dec_and_test(&eb->blocking_readers) &&
95 waitqueue_active(&eb->read_lock_wq))
96 wake_up(&eb->read_lock_wq);
101 * take a spinning read lock. This will wait for any blocking
104 void btrfs_tree_read_lock(struct extent_buffer *eb)
107 BUG_ON(!atomic_read(&eb->blocking_writers) &&
108 current->pid == eb->lock_owner);
110 read_lock(&eb->lock);
111 if (atomic_read(&eb->blocking_writers) &&
112 current->pid == eb->lock_owner) {
114 * This extent is already write-locked by our thread. We allow
115 * an additional read lock to be added because it's for the same
116 * thread. btrfs_find_all_roots() depends on this as it may be
117 * called on a partly (write-)locked tree.
119 BUG_ON(eb->lock_nested);
121 read_unlock(&eb->lock);
124 if (atomic_read(&eb->blocking_writers)) {
125 read_unlock(&eb->lock);
126 wait_event(eb->write_lock_wq,
127 atomic_read(&eb->blocking_writers) == 0);
130 atomic_inc(&eb->read_locks);
131 atomic_inc(&eb->spinning_readers);
135 * take a spinning read lock.
136 * returns 1 if we get the read lock and 0 if we don't
137 * this won't wait for blocking writers
139 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
141 if (atomic_read(&eb->blocking_writers))
144 read_lock(&eb->lock);
145 if (atomic_read(&eb->blocking_writers)) {
146 read_unlock(&eb->lock);
149 atomic_inc(&eb->read_locks);
150 atomic_inc(&eb->spinning_readers);
155 * returns 1 if we get the read lock and 0 if we don't
156 * this won't wait for blocking writers
158 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
160 if (atomic_read(&eb->blocking_writers))
163 if (!read_trylock(&eb->lock))
166 if (atomic_read(&eb->blocking_writers)) {
167 read_unlock(&eb->lock);
170 atomic_inc(&eb->read_locks);
171 atomic_inc(&eb->spinning_readers);
176 * returns 1 if we get the read lock and 0 if we don't
177 * this won't wait for blocking writers or readers
179 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
181 if (atomic_read(&eb->blocking_writers) ||
182 atomic_read(&eb->blocking_readers))
185 write_lock(&eb->lock);
186 if (atomic_read(&eb->blocking_writers) ||
187 atomic_read(&eb->blocking_readers)) {
188 write_unlock(&eb->lock);
191 atomic_inc(&eb->write_locks);
192 atomic_inc(&eb->spinning_writers);
193 eb->lock_owner = current->pid;
198 * drop a spinning read lock
200 void btrfs_tree_read_unlock(struct extent_buffer *eb)
203 * if we're nested, we have the write lock. No new locking
204 * is needed as long as we are the lock owner.
205 * The write unlock will do a barrier for us, and the lock_nested
206 * field only matters to the lock owner.
208 if (eb->lock_nested && current->pid == eb->lock_owner) {
212 btrfs_assert_tree_read_locked(eb);
213 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
214 atomic_dec(&eb->spinning_readers);
215 atomic_dec(&eb->read_locks);
216 read_unlock(&eb->lock);
220 * drop a blocking read lock
222 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
225 * if we're nested, we have the write lock. No new locking
226 * is needed as long as we are the lock owner.
227 * The write unlock will do a barrier for us, and the lock_nested
228 * field only matters to the lock owner.
230 if (eb->lock_nested && current->pid == eb->lock_owner) {
234 btrfs_assert_tree_read_locked(eb);
235 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
237 * atomic_dec_and_test implies a barrier for waitqueue_active
239 if (atomic_dec_and_test(&eb->blocking_readers) &&
240 waitqueue_active(&eb->read_lock_wq))
241 wake_up(&eb->read_lock_wq);
242 atomic_dec(&eb->read_locks);
246 * take a spinning write lock. This will wait for both
247 * blocking readers or writers
249 void btrfs_tree_lock(struct extent_buffer *eb)
251 WARN_ON(eb->lock_owner == current->pid);
253 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
254 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
255 write_lock(&eb->lock);
256 if (atomic_read(&eb->blocking_readers)) {
257 write_unlock(&eb->lock);
258 wait_event(eb->read_lock_wq,
259 atomic_read(&eb->blocking_readers) == 0);
262 if (atomic_read(&eb->blocking_writers)) {
263 write_unlock(&eb->lock);
264 wait_event(eb->write_lock_wq,
265 atomic_read(&eb->blocking_writers) == 0);
268 WARN_ON(atomic_read(&eb->spinning_writers));
269 atomic_inc(&eb->spinning_writers);
270 atomic_inc(&eb->write_locks);
271 eb->lock_owner = current->pid;
275 * drop a spinning or a blocking write lock.
277 void btrfs_tree_unlock(struct extent_buffer *eb)
279 int blockers = atomic_read(&eb->blocking_writers);
281 BUG_ON(blockers > 1);
283 btrfs_assert_tree_locked(eb);
285 atomic_dec(&eb->write_locks);
288 WARN_ON(atomic_read(&eb->spinning_writers));
289 atomic_dec(&eb->blocking_writers);
291 * Make sure counter is updated before we wake up waiters.
294 if (waitqueue_active(&eb->write_lock_wq))
295 wake_up(&eb->write_lock_wq);
297 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
298 atomic_dec(&eb->spinning_writers);
299 write_unlock(&eb->lock);
303 void btrfs_assert_tree_locked(struct extent_buffer *eb)
305 BUG_ON(!atomic_read(&eb->write_locks));
308 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
310 BUG_ON(!atomic_read(&eb->read_locks));