arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / fs / gfs2 / aops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41                              size_t from, size_t len)
42 {
43         struct buffer_head *head = folio_buffers(folio);
44         unsigned int bsize = head->b_size;
45         struct buffer_head *bh;
46         size_t to = from + len;
47         size_t start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from)
53                         continue;
54                 if (start >= to)
55                         break;
56                 set_buffer_uptodate(bh);
57                 gfs2_trans_add_data(ip->i_gl, bh);
58         }
59 }
60
61 /**
62  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63  * @inode: The inode
64  * @lblock: The block number to look up
65  * @bh_result: The buffer head to return the result in
66  * @create: Non-zero if we may add block to the file
67  *
68  * Returns: errno
69  */
70
71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72                                   struct buffer_head *bh_result, int create)
73 {
74         int error;
75
76         error = gfs2_block_map(inode, lblock, bh_result, 0);
77         if (error)
78                 return error;
79         if (!buffer_mapped(bh_result))
80                 return -ENODATA;
81         return 0;
82 }
83
84 /**
85  * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
86  * @folio: The folio to write
87  * @wbc: The writeback control
88  *
89  * This is the same as calling block_write_full_page, but it also
90  * writes pages outside of i_size
91  */
92 static int gfs2_write_jdata_folio(struct folio *folio,
93                                  struct writeback_control *wbc)
94 {
95         struct inode * const inode = folio->mapping->host;
96         loff_t i_size = i_size_read(inode);
97
98         /*
99          * The folio straddles i_size.  It must be zeroed out on each and every
100          * writepage invocation because it may be mmapped.  "A file is mapped
101          * in multiples of the page size.  For a file that is not a multiple of
102          * the page size, the remaining memory is zeroed when mapped, and
103          * writes to that region are not written out to the file."
104          */
105         if (folio_pos(folio) < i_size &&
106             i_size < folio_pos(folio) + folio_size(folio))
107                 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108                                 folio_size(folio));
109
110         return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111                         wbc, end_buffer_async_write);
112 }
113
114 /**
115  * __gfs2_jdata_write_folio - The core of jdata writepage
116  * @folio: The folio to write
117  * @wbc: The writeback control
118  *
119  * This is shared between writepage and writepages and implements the
120  * core of the writepage operation. If a transaction is required then
121  * the checked flag will have been set and the transaction will have
122  * already been started before this is called.
123  */
124 static int __gfs2_jdata_write_folio(struct folio *folio,
125                 struct writeback_control *wbc)
126 {
127         struct inode *inode = folio->mapping->host;
128         struct gfs2_inode *ip = GFS2_I(inode);
129
130         if (folio_test_checked(folio)) {
131                 folio_clear_checked(folio);
132                 if (!folio_buffers(folio)) {
133                         create_empty_buffers(folio,
134                                         inode->i_sb->s_blocksize,
135                                         BIT(BH_Dirty)|BIT(BH_Uptodate));
136                 }
137                 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
138         }
139         return gfs2_write_jdata_folio(folio, wbc);
140 }
141
142 /**
143  * gfs2_jdata_writepage - Write complete page
144  * @page: Page to write
145  * @wbc: The writeback control
146  *
147  * Returns: errno
148  *
149  */
150
151 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153         struct folio *folio = page_folio(page);
154         struct inode *inode = page->mapping->host;
155         struct gfs2_inode *ip = GFS2_I(inode);
156         struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158         if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
159                 goto out;
160         if (folio_test_checked(folio) || current->journal_info)
161                 goto out_ignore;
162         return __gfs2_jdata_write_folio(folio, wbc);
163
164 out_ignore:
165         folio_redirty_for_writepage(wbc, folio);
166 out:
167         folio_unlock(folio);
168         return 0;
169 }
170
171 /**
172  * gfs2_writepages - Write a bunch of dirty pages back to disk
173  * @mapping: The mapping to write
174  * @wbc: Write-back control
175  *
176  * Used for both ordered and writeback modes.
177  */
178 static int gfs2_writepages(struct address_space *mapping,
179                            struct writeback_control *wbc)
180 {
181         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182         struct iomap_writepage_ctx wpc = { };
183         int ret;
184
185         /*
186          * Even if we didn't write enough pages here, we might still be holding
187          * dirty pages in the ail. We forcibly flush the ail because we don't
188          * want balance_dirty_pages() to loop indefinitely trying to write out
189          * pages held in the ail that it can't find.
190          */
191         ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192         if (ret == 0 && wbc->nr_to_write > 0)
193                 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194         return ret;
195 }
196
197 /**
198  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
199  * @mapping: The mapping
200  * @wbc: The writeback control
201  * @fbatch: The batch of folios
202  * @done_index: Page index
203  *
204  * Returns: non-zero if loop should terminate, zero otherwise
205  */
206
207 static int gfs2_write_jdata_batch(struct address_space *mapping,
208                                     struct writeback_control *wbc,
209                                     struct folio_batch *fbatch,
210                                     pgoff_t *done_index)
211 {
212         struct inode *inode = mapping->host;
213         struct gfs2_sbd *sdp = GFS2_SB(inode);
214         unsigned nrblocks;
215         int i;
216         int ret;
217         size_t size = 0;
218         int nr_folios = folio_batch_count(fbatch);
219
220         for (i = 0; i < nr_folios; i++)
221                 size += folio_size(fbatch->folios[i]);
222         nrblocks = size >> inode->i_blkbits;
223
224         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
225         if (ret < 0)
226                 return ret;
227
228         for (i = 0; i < nr_folios; i++) {
229                 struct folio *folio = fbatch->folios[i];
230
231                 *done_index = folio->index;
232
233                 folio_lock(folio);
234
235                 if (unlikely(folio->mapping != mapping)) {
236 continue_unlock:
237                         folio_unlock(folio);
238                         continue;
239                 }
240
241                 if (!folio_test_dirty(folio)) {
242                         /* someone wrote it for us */
243                         goto continue_unlock;
244                 }
245
246                 if (folio_test_writeback(folio)) {
247                         if (wbc->sync_mode != WB_SYNC_NONE)
248                                 folio_wait_writeback(folio);
249                         else
250                                 goto continue_unlock;
251                 }
252
253                 BUG_ON(folio_test_writeback(folio));
254                 if (!folio_clear_dirty_for_io(folio))
255                         goto continue_unlock;
256
257                 trace_wbc_writepage(wbc, inode_to_bdi(inode));
258
259                 ret = __gfs2_jdata_write_folio(folio, wbc);
260                 if (unlikely(ret)) {
261                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
262                                 folio_unlock(folio);
263                                 ret = 0;
264                         } else {
265
266                                 /*
267                                  * done_index is set past this page,
268                                  * so media errors will not choke
269                                  * background writeout for the entire
270                                  * file. This has consequences for
271                                  * range_cyclic semantics (ie. it may
272                                  * not be suitable for data integrity
273                                  * writeout).
274                                  */
275                                 *done_index = folio_next_index(folio);
276                                 ret = 1;
277                                 break;
278                         }
279                 }
280
281                 /*
282                  * We stop writing back only if we are not doing
283                  * integrity sync. In case of integrity sync we have to
284                  * keep going until we have written all the pages
285                  * we tagged for writeback prior to entering this loop.
286                  */
287                 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
288                         ret = 1;
289                         break;
290                 }
291
292         }
293         gfs2_trans_end(sdp);
294         return ret;
295 }
296
297 /**
298  * gfs2_write_cache_jdata - Like write_cache_pages but different
299  * @mapping: The mapping to write
300  * @wbc: The writeback control
301  *
302  * The reason that we use our own function here is that we need to
303  * start transactions before we grab page locks. This allows us
304  * to get the ordering right.
305  */
306
307 static int gfs2_write_cache_jdata(struct address_space *mapping,
308                                   struct writeback_control *wbc)
309 {
310         int ret = 0;
311         int done = 0;
312         struct folio_batch fbatch;
313         int nr_folios;
314         pgoff_t writeback_index;
315         pgoff_t index;
316         pgoff_t end;
317         pgoff_t done_index;
318         int cycled;
319         int range_whole = 0;
320         xa_mark_t tag;
321
322         folio_batch_init(&fbatch);
323         if (wbc->range_cyclic) {
324                 writeback_index = mapping->writeback_index; /* prev offset */
325                 index = writeback_index;
326                 if (index == 0)
327                         cycled = 1;
328                 else
329                         cycled = 0;
330                 end = -1;
331         } else {
332                 index = wbc->range_start >> PAGE_SHIFT;
333                 end = wbc->range_end >> PAGE_SHIFT;
334                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335                         range_whole = 1;
336                 cycled = 1; /* ignore range_cyclic tests */
337         }
338         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339                 tag = PAGECACHE_TAG_TOWRITE;
340         else
341                 tag = PAGECACHE_TAG_DIRTY;
342
343 retry:
344         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345                 tag_pages_for_writeback(mapping, index, end);
346         done_index = index;
347         while (!done && (index <= end)) {
348                 nr_folios = filemap_get_folios_tag(mapping, &index, end,
349                                 tag, &fbatch);
350                 if (nr_folios == 0)
351                         break;
352
353                 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354                                 &done_index);
355                 if (ret)
356                         done = 1;
357                 if (ret > 0)
358                         ret = 0;
359                 folio_batch_release(&fbatch);
360                 cond_resched();
361         }
362
363         if (!cycled && !done) {
364                 /*
365                  * range_cyclic:
366                  * We hit the last page and there is more work to be done: wrap
367                  * back to the start of the file
368                  */
369                 cycled = 1;
370                 index = 0;
371                 end = writeback_index - 1;
372                 goto retry;
373         }
374
375         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
376                 mapping->writeback_index = done_index;
377
378         return ret;
379 }
380
381
382 /**
383  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384  * @mapping: The mapping to write
385  * @wbc: The writeback control
386  * 
387  */
388
389 static int gfs2_jdata_writepages(struct address_space *mapping,
390                                  struct writeback_control *wbc)
391 {
392         struct gfs2_inode *ip = GFS2_I(mapping->host);
393         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394         int ret;
395
396         ret = gfs2_write_cache_jdata(mapping, wbc);
397         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
398                 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399                                GFS2_LFC_JDATA_WPAGES);
400                 ret = gfs2_write_cache_jdata(mapping, wbc);
401         }
402         return ret;
403 }
404
405 /**
406  * stuffed_readpage - Fill in a Linux folio with stuffed file data
407  * @ip: the inode
408  * @folio: the folio
409  *
410  * Returns: errno
411  */
412 static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
413 {
414         struct buffer_head *dibh;
415         size_t i_size = i_size_read(&ip->i_inode);
416         void *data;
417         int error;
418
419         /*
420          * Due to the order of unstuffing files and ->fault(), we can be
421          * asked for a zero folio in the case of a stuffed file being extended,
422          * so we need to supply one here. It doesn't happen often.
423          */
424         if (unlikely(folio->index)) {
425                 folio_zero_range(folio, 0, folio_size(folio));
426                 folio_mark_uptodate(folio);
427                 return 0;
428         }
429
430         error = gfs2_meta_inode_buffer(ip, &dibh);
431         if (error)
432                 return error;
433
434         data = dibh->b_data + sizeof(struct gfs2_dinode);
435         memcpy_to_folio(folio, 0, data, i_size);
436         folio_zero_range(folio, i_size, folio_size(folio) - i_size);
437         brelse(dibh);
438         folio_mark_uptodate(folio);
439
440         return 0;
441 }
442
443 /**
444  * gfs2_read_folio - read a folio from a file
445  * @file: The file to read
446  * @folio: The folio in the file
447  */
448 static int gfs2_read_folio(struct file *file, struct folio *folio)
449 {
450         struct inode *inode = folio->mapping->host;
451         struct gfs2_inode *ip = GFS2_I(inode);
452         struct gfs2_sbd *sdp = GFS2_SB(inode);
453         int error;
454
455         if (!gfs2_is_jdata(ip) ||
456             (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
457                 error = iomap_read_folio(folio, &gfs2_iomap_ops);
458         } else if (gfs2_is_stuffed(ip)) {
459                 error = stuffed_readpage(ip, folio);
460                 folio_unlock(folio);
461         } else {
462                 error = mpage_read_folio(folio, gfs2_block_map);
463         }
464
465         if (unlikely(gfs2_withdrawn(sdp)))
466                 return -EIO;
467
468         return error;
469 }
470
471 /**
472  * gfs2_internal_read - read an internal file
473  * @ip: The gfs2 inode
474  * @buf: The buffer to fill
475  * @pos: The file position
476  * @size: The amount to read
477  *
478  */
479
480 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
481                            size_t size)
482 {
483         struct address_space *mapping = ip->i_inode.i_mapping;
484         unsigned long index = *pos >> PAGE_SHIFT;
485         size_t copied = 0;
486
487         do {
488                 size_t offset, chunk;
489                 struct folio *folio;
490
491                 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
492                 if (IS_ERR(folio)) {
493                         if (PTR_ERR(folio) == -EINTR)
494                                 continue;
495                         return PTR_ERR(folio);
496                 }
497                 offset = *pos + copied - folio_pos(folio);
498                 chunk = min(size - copied, folio_size(folio) - offset);
499                 memcpy_from_folio(buf + copied, folio, offset, chunk);
500                 index = folio_next_index(folio);
501                 folio_put(folio);
502                 copied += chunk;
503         } while(copied < size);
504         (*pos) += size;
505         return size;
506 }
507
508 /**
509  * gfs2_readahead - Read a bunch of pages at once
510  * @rac: Read-ahead control structure
511  *
512  * Some notes:
513  * 1. This is only for readahead, so we can simply ignore any things
514  *    which are slightly inconvenient (such as locking conflicts between
515  *    the page lock and the glock) and return having done no I/O. Its
516  *    obviously not something we'd want to do on too regular a basis.
517  *    Any I/O we ignore at this time will be done via readpage later.
518  * 2. We don't handle stuffed files here we let readpage do the honours.
519  * 3. mpage_readahead() does most of the heavy lifting in the common case.
520  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
521  */
522
523 static void gfs2_readahead(struct readahead_control *rac)
524 {
525         struct inode *inode = rac->mapping->host;
526         struct gfs2_inode *ip = GFS2_I(inode);
527
528         if (gfs2_is_stuffed(ip))
529                 ;
530         else if (gfs2_is_jdata(ip))
531                 mpage_readahead(rac, gfs2_block_map);
532         else
533                 iomap_readahead(rac, &gfs2_iomap_ops);
534 }
535
536 /**
537  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
538  * @inode: the rindex inode
539  */
540 void adjust_fs_space(struct inode *inode)
541 {
542         struct gfs2_sbd *sdp = GFS2_SB(inode);
543         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
544         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
545         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
546         struct buffer_head *m_bh;
547         u64 fs_total, new_free;
548
549         if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
550                 return;
551
552         /* Total up the file system space, according to the latest rindex. */
553         fs_total = gfs2_ri_total(sdp);
554         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
555                 goto out;
556
557         spin_lock(&sdp->sd_statfs_spin);
558         gfs2_statfs_change_in(m_sc, m_bh->b_data +
559                               sizeof(struct gfs2_dinode));
560         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
561                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
562         else
563                 new_free = 0;
564         spin_unlock(&sdp->sd_statfs_spin);
565         fs_warn(sdp, "File system extended by %llu blocks.\n",
566                 (unsigned long long)new_free);
567         gfs2_statfs_change(sdp, new_free, new_free, 0);
568
569         update_statfs(sdp, m_bh);
570         brelse(m_bh);
571 out:
572         sdp->sd_rindex_uptodate = 0;
573         gfs2_trans_end(sdp);
574 }
575
576 static bool jdata_dirty_folio(struct address_space *mapping,
577                 struct folio *folio)
578 {
579         if (current->journal_info)
580                 folio_set_checked(folio);
581         return block_dirty_folio(mapping, folio);
582 }
583
584 /**
585  * gfs2_bmap - Block map function
586  * @mapping: Address space info
587  * @lblock: The block to map
588  *
589  * Returns: The disk address for the block or 0 on hole or error
590  */
591
592 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
593 {
594         struct gfs2_inode *ip = GFS2_I(mapping->host);
595         struct gfs2_holder i_gh;
596         sector_t dblock = 0;
597         int error;
598
599         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
600         if (error)
601                 return 0;
602
603         if (!gfs2_is_stuffed(ip))
604                 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
605
606         gfs2_glock_dq_uninit(&i_gh);
607
608         return dblock;
609 }
610
611 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
612 {
613         struct gfs2_bufdata *bd;
614
615         lock_buffer(bh);
616         gfs2_log_lock(sdp);
617         clear_buffer_dirty(bh);
618         bd = bh->b_private;
619         if (bd) {
620                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
621                         list_del_init(&bd->bd_list);
622                 else {
623                         spin_lock(&sdp->sd_ail_lock);
624                         gfs2_remove_from_journal(bh, REMOVE_JDATA);
625                         spin_unlock(&sdp->sd_ail_lock);
626                 }
627         }
628         bh->b_bdev = NULL;
629         clear_buffer_mapped(bh);
630         clear_buffer_req(bh);
631         clear_buffer_new(bh);
632         gfs2_log_unlock(sdp);
633         unlock_buffer(bh);
634 }
635
636 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
637                                 size_t length)
638 {
639         struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
640         size_t stop = offset + length;
641         int partial_page = (offset || length < folio_size(folio));
642         struct buffer_head *bh, *head;
643         unsigned long pos = 0;
644
645         BUG_ON(!folio_test_locked(folio));
646         if (!partial_page)
647                 folio_clear_checked(folio);
648         head = folio_buffers(folio);
649         if (!head)
650                 goto out;
651
652         bh = head;
653         do {
654                 if (pos + bh->b_size > stop)
655                         return;
656
657                 if (offset <= pos)
658                         gfs2_discard(sdp, bh);
659                 pos += bh->b_size;
660                 bh = bh->b_this_page;
661         } while (bh != head);
662 out:
663         if (!partial_page)
664                 filemap_release_folio(folio, 0);
665 }
666
667 /**
668  * gfs2_release_folio - free the metadata associated with a folio
669  * @folio: the folio that's being released
670  * @gfp_mask: passed from Linux VFS, ignored by us
671  *
672  * Calls try_to_free_buffers() to free the buffers and put the folio if the
673  * buffers can be released.
674  *
675  * Returns: true if the folio was put or else false
676  */
677
678 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
679 {
680         struct address_space *mapping = folio->mapping;
681         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
682         struct buffer_head *bh, *head;
683         struct gfs2_bufdata *bd;
684
685         head = folio_buffers(folio);
686         if (!head)
687                 return false;
688
689         /*
690          * mm accommodates an old ext3 case where clean folios might
691          * not have had the dirty bit cleared.  Thus, it can send actual
692          * dirty folios to ->release_folio() via shrink_active_list().
693          *
694          * As a workaround, we skip folios that contain dirty buffers
695          * below.  Once ->release_folio isn't called on dirty folios
696          * anymore, we can warn on dirty buffers like we used to here
697          * again.
698          */
699
700         gfs2_log_lock(sdp);
701         bh = head;
702         do {
703                 if (atomic_read(&bh->b_count))
704                         goto cannot_release;
705                 bd = bh->b_private;
706                 if (bd && bd->bd_tr)
707                         goto cannot_release;
708                 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
709                         goto cannot_release;
710                 bh = bh->b_this_page;
711         } while (bh != head);
712
713         bh = head;
714         do {
715                 bd = bh->b_private;
716                 if (bd) {
717                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
718                         bd->bd_bh = NULL;
719                         bh->b_private = NULL;
720                         /*
721                          * The bd may still be queued as a revoke, in which
722                          * case we must not dequeue nor free it.
723                          */
724                         if (!bd->bd_blkno && !list_empty(&bd->bd_list))
725                                 list_del_init(&bd->bd_list);
726                         if (list_empty(&bd->bd_list))
727                                 kmem_cache_free(gfs2_bufdata_cachep, bd);
728                 }
729
730                 bh = bh->b_this_page;
731         } while (bh != head);
732         gfs2_log_unlock(sdp);
733
734         return try_to_free_buffers(folio);
735
736 cannot_release:
737         gfs2_log_unlock(sdp);
738         return false;
739 }
740
741 static const struct address_space_operations gfs2_aops = {
742         .writepages = gfs2_writepages,
743         .read_folio = gfs2_read_folio,
744         .readahead = gfs2_readahead,
745         .dirty_folio = iomap_dirty_folio,
746         .release_folio = iomap_release_folio,
747         .invalidate_folio = iomap_invalidate_folio,
748         .bmap = gfs2_bmap,
749         .migrate_folio = filemap_migrate_folio,
750         .is_partially_uptodate = iomap_is_partially_uptodate,
751         .error_remove_page = generic_error_remove_page,
752 };
753
754 static const struct address_space_operations gfs2_jdata_aops = {
755         .writepage = gfs2_jdata_writepage,
756         .writepages = gfs2_jdata_writepages,
757         .read_folio = gfs2_read_folio,
758         .readahead = gfs2_readahead,
759         .dirty_folio = jdata_dirty_folio,
760         .bmap = gfs2_bmap,
761         .invalidate_folio = gfs2_invalidate_folio,
762         .release_folio = gfs2_release_folio,
763         .is_partially_uptodate = block_is_partially_uptodate,
764         .error_remove_page = generic_error_remove_page,
765 };
766
767 void gfs2_set_aops(struct inode *inode)
768 {
769         if (gfs2_is_jdata(GFS2_I(inode)))
770                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
771         else
772                 inode->i_mapping->a_ops = &gfs2_aops;
773 }