GNU Linux-libre 4.14.328-gnu1
[releases.git] / fs / ocfs2 / buffer_head_io.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * io.c
5  *
6  * Buffer cache handling
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/highmem.h>
29 #include <linux/bio.h>
30
31 #include <cluster/masklog.h>
32
33 #include "ocfs2.h"
34
35 #include "alloc.h"
36 #include "inode.h"
37 #include "journal.h"
38 #include "uptodate.h"
39 #include "buffer_head_io.h"
40 #include "ocfs2_trace.h"
41
42 /*
43  * Bits on bh->b_state used by ocfs2.
44  *
45  * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
46  */
47 enum ocfs2_state_bits {
48         BH_NeedsValidate = BH_JBDPrivateStart,
49 };
50
51 /* Expand the magic b_state functions */
52 BUFFER_FNS(NeedsValidate, needs_validate);
53
54 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
55                       struct ocfs2_caching_info *ci)
56 {
57         int ret = 0;
58
59         trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
60
61         BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
62         BUG_ON(buffer_jbd(bh));
63
64         /* No need to check for a soft readonly file system here. non
65          * journalled writes are only ever done on system files which
66          * can get modified during recovery even if read-only. */
67         if (ocfs2_is_hard_readonly(osb)) {
68                 ret = -EROFS;
69                 mlog_errno(ret);
70                 goto out;
71         }
72
73         ocfs2_metadata_cache_io_lock(ci);
74
75         lock_buffer(bh);
76         set_buffer_uptodate(bh);
77
78         /* remove from dirty list before I/O. */
79         clear_buffer_dirty(bh);
80
81         get_bh(bh); /* for end_buffer_write_sync() */
82         bh->b_end_io = end_buffer_write_sync;
83         submit_bh(REQ_OP_WRITE, 0, bh);
84
85         wait_on_buffer(bh);
86
87         if (buffer_uptodate(bh)) {
88                 ocfs2_set_buffer_uptodate(ci, bh);
89         } else {
90                 /* We don't need to remove the clustered uptodate
91                  * information for this bh as it's not marked locally
92                  * uptodate. */
93                 ret = -EIO;
94                 mlog_errno(ret);
95         }
96
97         ocfs2_metadata_cache_io_unlock(ci);
98 out:
99         return ret;
100 }
101
102 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
103  * will be easier to handle read failure.
104  */
105 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
106                            unsigned int nr, struct buffer_head *bhs[])
107 {
108         int status = 0;
109         unsigned int i;
110         struct buffer_head *bh;
111         int new_bh = 0;
112
113         trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
114
115         if (!nr)
116                 goto bail;
117
118         /* Don't put buffer head and re-assign it to NULL if it is allocated
119          * outside since the caller can't be aware of this alternation!
120          */
121         new_bh = (bhs[0] == NULL);
122
123         for (i = 0 ; i < nr ; i++) {
124                 if (bhs[i] == NULL) {
125                         bhs[i] = sb_getblk(osb->sb, block++);
126                         if (bhs[i] == NULL) {
127                                 status = -ENOMEM;
128                                 mlog_errno(status);
129                                 break;
130                         }
131                 }
132                 bh = bhs[i];
133
134                 if (buffer_jbd(bh)) {
135                         trace_ocfs2_read_blocks_sync_jbd(
136                                         (unsigned long long)bh->b_blocknr);
137                         continue;
138                 }
139
140                 if (buffer_dirty(bh)) {
141                         /* This should probably be a BUG, or
142                          * at least return an error. */
143                         mlog(ML_ERROR,
144                              "trying to sync read a dirty "
145                              "buffer! (blocknr = %llu), skipping\n",
146                              (unsigned long long)bh->b_blocknr);
147                         continue;
148                 }
149
150                 lock_buffer(bh);
151                 if (buffer_jbd(bh)) {
152 #ifdef CATCH_BH_JBD_RACES
153                         mlog(ML_ERROR,
154                              "block %llu had the JBD bit set "
155                              "while I was in lock_buffer!",
156                              (unsigned long long)bh->b_blocknr);
157                         BUG();
158 #else
159                         unlock_buffer(bh);
160                         continue;
161 #endif
162                 }
163
164                 get_bh(bh); /* for end_buffer_read_sync() */
165                 bh->b_end_io = end_buffer_read_sync;
166                 submit_bh(REQ_OP_READ, 0, bh);
167         }
168
169 read_failure:
170         for (i = nr; i > 0; i--) {
171                 bh = bhs[i - 1];
172
173                 if (unlikely(status)) {
174                         if (new_bh && bh) {
175                                 /* If middle bh fails, let previous bh
176                                  * finish its read and then put it to
177                                  * aovoid bh leak
178                                  */
179                                 if (!buffer_jbd(bh))
180                                         wait_on_buffer(bh);
181                                 put_bh(bh);
182                                 bhs[i - 1] = NULL;
183                         } else if (bh && buffer_uptodate(bh)) {
184                                 clear_buffer_uptodate(bh);
185                         }
186                         continue;
187                 }
188
189                 /* No need to wait on the buffer if it's managed by JBD. */
190                 if (!buffer_jbd(bh))
191                         wait_on_buffer(bh);
192
193                 if (!buffer_uptodate(bh)) {
194                         /* Status won't be cleared from here on out,
195                          * so we can safely record this and loop back
196                          * to cleanup the other buffers. */
197                         status = -EIO;
198                         goto read_failure;
199                 }
200         }
201
202 bail:
203         return status;
204 }
205
206 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
207  * will be easier to handle read failure.
208  */
209 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
210                       struct buffer_head *bhs[], int flags,
211                       int (*validate)(struct super_block *sb,
212                                       struct buffer_head *bh))
213 {
214         int status = 0;
215         int i, ignore_cache = 0;
216         struct buffer_head *bh;
217         struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
218         int new_bh = 0;
219
220         trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
221
222         BUG_ON(!ci);
223         BUG_ON((flags & OCFS2_BH_READAHEAD) &&
224                (flags & OCFS2_BH_IGNORE_CACHE));
225
226         if (bhs == NULL) {
227                 status = -EINVAL;
228                 mlog_errno(status);
229                 goto bail;
230         }
231
232         if (nr < 0) {
233                 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
234                 status = -EINVAL;
235                 mlog_errno(status);
236                 goto bail;
237         }
238
239         if (nr == 0) {
240                 status = 0;
241                 goto bail;
242         }
243
244         /* Don't put buffer head and re-assign it to NULL if it is allocated
245          * outside since the caller can't be aware of this alternation!
246          */
247         new_bh = (bhs[0] == NULL);
248
249         ocfs2_metadata_cache_io_lock(ci);
250         for (i = 0 ; i < nr ; i++) {
251                 if (bhs[i] == NULL) {
252                         bhs[i] = sb_getblk(sb, block++);
253                         if (bhs[i] == NULL) {
254                                 ocfs2_metadata_cache_io_unlock(ci);
255                                 status = -ENOMEM;
256                                 mlog_errno(status);
257                                 /* Don't forget to put previous bh! */
258                                 break;
259                         }
260                 }
261                 bh = bhs[i];
262                 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
263
264                 /* There are three read-ahead cases here which we need to
265                  * be concerned with. All three assume a buffer has
266                  * previously been submitted with OCFS2_BH_READAHEAD
267                  * and it hasn't yet completed I/O.
268                  *
269                  * 1) The current request is sync to disk. This rarely
270                  *    happens these days, and never when performance
271                  *    matters - the code can just wait on the buffer
272                  *    lock and re-submit.
273                  *
274                  * 2) The current request is cached, but not
275                  *    readahead. ocfs2_buffer_uptodate() will return
276                  *    false anyway, so we'll wind up waiting on the
277                  *    buffer lock to do I/O. We re-check the request
278                  *    with after getting the lock to avoid a re-submit.
279                  *
280                  * 3) The current request is readahead (and so must
281                  *    also be a caching one). We short circuit if the
282                  *    buffer is locked (under I/O) and if it's in the
283                  *    uptodate cache. The re-check from #2 catches the
284                  *    case that the previous read-ahead completes just
285                  *    before our is-it-in-flight check.
286                  */
287
288                 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
289                         trace_ocfs2_read_blocks_from_disk(
290                              (unsigned long long)bh->b_blocknr,
291                              (unsigned long long)ocfs2_metadata_cache_owner(ci));
292                         /* We're using ignore_cache here to say
293                          * "go to disk" */
294                         ignore_cache = 1;
295                 }
296
297                 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
298                         ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
299
300                 if (buffer_jbd(bh)) {
301                         continue;
302                 }
303
304                 if (ignore_cache) {
305                         if (buffer_dirty(bh)) {
306                                 /* This should probably be a BUG, or
307                                  * at least return an error. */
308                                 continue;
309                         }
310
311                         /* A read-ahead request was made - if the
312                          * buffer is already under read-ahead from a
313                          * previously submitted request than we are
314                          * done here. */
315                         if ((flags & OCFS2_BH_READAHEAD)
316                             && ocfs2_buffer_read_ahead(ci, bh))
317                                 continue;
318
319                         lock_buffer(bh);
320                         if (buffer_jbd(bh)) {
321 #ifdef CATCH_BH_JBD_RACES
322                                 mlog(ML_ERROR, "block %llu had the JBD bit set "
323                                                "while I was in lock_buffer!",
324                                      (unsigned long long)bh->b_blocknr);
325                                 BUG();
326 #else
327                                 unlock_buffer(bh);
328                                 continue;
329 #endif
330                         }
331
332                         /* Re-check ocfs2_buffer_uptodate() as a
333                          * previously read-ahead buffer may have
334                          * completed I/O while we were waiting for the
335                          * buffer lock. */
336                         if (!(flags & OCFS2_BH_IGNORE_CACHE)
337                             && !(flags & OCFS2_BH_READAHEAD)
338                             && ocfs2_buffer_uptodate(ci, bh)) {
339                                 unlock_buffer(bh);
340                                 continue;
341                         }
342
343                         get_bh(bh); /* for end_buffer_read_sync() */
344                         if (validate)
345                                 set_buffer_needs_validate(bh);
346                         bh->b_end_io = end_buffer_read_sync;
347                         submit_bh(REQ_OP_READ, 0, bh);
348                         continue;
349                 }
350         }
351
352 read_failure:
353         for (i = (nr - 1); i >= 0; i--) {
354                 bh = bhs[i];
355
356                 if (!(flags & OCFS2_BH_READAHEAD)) {
357                         if (unlikely(status)) {
358                                 /* Clear the buffers on error including those
359                                  * ever succeeded in reading
360                                  */
361                                 if (new_bh && bh) {
362                                         /* If middle bh fails, let previous bh
363                                          * finish its read and then put it to
364                                          * aovoid bh leak
365                                          */
366                                         if (!buffer_jbd(bh))
367                                                 wait_on_buffer(bh);
368                                         put_bh(bh);
369                                         bhs[i] = NULL;
370                                 } else if (bh && buffer_uptodate(bh)) {
371                                         clear_buffer_uptodate(bh);
372                                 }
373                                 continue;
374                         }
375                         /* We know this can't have changed as we hold the
376                          * owner sem. Avoid doing any work on the bh if the
377                          * journal has it. */
378                         if (!buffer_jbd(bh))
379                                 wait_on_buffer(bh);
380
381                         if (!buffer_uptodate(bh)) {
382                                 /* Status won't be cleared from here on out,
383                                  * so we can safely record this and loop back
384                                  * to cleanup the other buffers. Don't need to
385                                  * remove the clustered uptodate information
386                                  * for this bh as it's not marked locally
387                                  * uptodate. */
388                                 status = -EIO;
389                                 clear_buffer_needs_validate(bh);
390                                 goto read_failure;
391                         }
392
393                         if (buffer_needs_validate(bh)) {
394                                 /* We never set NeedsValidate if the
395                                  * buffer was held by the journal, so
396                                  * that better not have changed */
397                                 BUG_ON(buffer_jbd(bh));
398                                 clear_buffer_needs_validate(bh);
399                                 status = validate(sb, bh);
400                                 if (status)
401                                         goto read_failure;
402                         }
403                 }
404
405                 /* Always set the buffer in the cache, even if it was
406                  * a forced read, or read-ahead which hasn't yet
407                  * completed. */
408                 ocfs2_set_buffer_uptodate(ci, bh);
409         }
410         ocfs2_metadata_cache_io_unlock(ci);
411
412         trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
413                                     flags, ignore_cache);
414
415 bail:
416
417         return status;
418 }
419
420 /* Check whether the blkno is the super block or one of the backups. */
421 static void ocfs2_check_super_or_backup(struct super_block *sb,
422                                         sector_t blkno)
423 {
424         int i;
425         u64 backup_blkno;
426
427         if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
428                 return;
429
430         for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
431                 backup_blkno = ocfs2_backup_super_blkno(sb, i);
432                 if (backup_blkno == blkno)
433                         return;
434         }
435
436         BUG();
437 }
438
439 /*
440  * Write super block and backups doesn't need to collaborate with journal,
441  * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
442  * into this function.
443  */
444 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
445                                 struct buffer_head *bh)
446 {
447         int ret = 0;
448         struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
449
450         BUG_ON(buffer_jbd(bh));
451         ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
452
453         if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
454                 ret = -EROFS;
455                 mlog_errno(ret);
456                 goto out;
457         }
458
459         lock_buffer(bh);
460         set_buffer_uptodate(bh);
461
462         /* remove from dirty list before I/O. */
463         clear_buffer_dirty(bh);
464
465         get_bh(bh); /* for end_buffer_write_sync() */
466         bh->b_end_io = end_buffer_write_sync;
467         ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
468         submit_bh(REQ_OP_WRITE, 0, bh);
469
470         wait_on_buffer(bh);
471
472         if (!buffer_uptodate(bh)) {
473                 ret = -EIO;
474                 mlog_errno(ret);
475         }
476
477 out:
478         return ret;
479 }