GNU Linux-libre 5.15.131-gnu
[releases.git] / include / linux / buffer_head.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/buffer_head.h
4  *
5  * Everything to do with buffer_heads.
6  */
7
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/wait.h>
16 #include <linux/atomic.h>
17
18 #ifdef CONFIG_BLOCK
19
20 enum bh_state_bits {
21         BH_Uptodate,    /* Contains valid data */
22         BH_Dirty,       /* Is dirty */
23         BH_Lock,        /* Is locked */
24         BH_Req,         /* Has been submitted for I/O */
25
26         BH_Mapped,      /* Has a disk mapping */
27         BH_New,         /* Disk mapping was newly created by get_block */
28         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
29         BH_Async_Write, /* Is under end_buffer_async_write I/O */
30         BH_Delay,       /* Buffer is not yet allocated on disk */
31         BH_Boundary,    /* Block is followed by a discontiguity */
32         BH_Write_EIO,   /* I/O error on write */
33         BH_Unwritten,   /* Buffer is allocated on disk but not written */
34         BH_Quiet,       /* Buffer Error Prinks to be quiet */
35         BH_Meta,        /* Buffer contains metadata */
36         BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
37         BH_Defer_Completion, /* Defer AIO completion to workqueue */
38
39         BH_PrivateStart,/* not a state bit, but the first bit available
40                          * for private allocation by other entities
41                          */
42 };
43
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46 struct page;
47 struct buffer_head;
48 struct address_space;
49 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50
51 /*
52  * Historically, a buffer_head was used to map a single block
53  * within a page, and of course as the unit of I/O through the
54  * filesystem and block layers.  Nowadays the basic I/O unit
55  * is the bio, and buffer_heads are used for extracting block
56  * mappings (via a get_block_t call), for tracking state within
57  * a page (via a page_mapping) and for wrapping bio submission
58  * for backward compatibility reasons (e.g. submit_bh).
59  */
60 struct buffer_head {
61         unsigned long b_state;          /* buffer state bitmap (see above) */
62         struct buffer_head *b_this_page;/* circular list of page's buffers */
63         struct page *b_page;            /* the page this bh is mapped to */
64
65         sector_t b_blocknr;             /* start block number */
66         size_t b_size;                  /* size of mapping */
67         char *b_data;                   /* pointer to data within the page */
68
69         struct block_device *b_bdev;
70         bh_end_io_t *b_end_io;          /* I/O completion */
71         void *b_private;                /* reserved for b_end_io */
72         struct list_head b_assoc_buffers; /* associated with another mapping */
73         struct address_space *b_assoc_map;      /* mapping this buffer is
74                                                    associated with */
75         atomic_t b_count;               /* users using this buffer_head */
76         spinlock_t b_uptodate_lock;     /* Used by the first bh in a page, to
77                                          * serialise IO completion of other
78                                          * buffers in the page */
79 };
80
81 /*
82  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83  * and buffer_foo() functions.
84  * To avoid reset buffer flags that are already set, because that causes
85  * a costly cache line transition, check the flag first.
86  */
87 #define BUFFER_FNS(bit, name)                                           \
88 static __always_inline void set_buffer_##name(struct buffer_head *bh)   \
89 {                                                                       \
90         if (!test_bit(BH_##bit, &(bh)->b_state))                        \
91                 set_bit(BH_##bit, &(bh)->b_state);                      \
92 }                                                                       \
93 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
94 {                                                                       \
95         clear_bit(BH_##bit, &(bh)->b_state);                            \
96 }                                                                       \
97 static __always_inline int buffer_##name(const struct buffer_head *bh)  \
98 {                                                                       \
99         return test_bit(BH_##bit, &(bh)->b_state);                      \
100 }
101
102 /*
103  * test_set_buffer_foo() and test_clear_buffer_foo()
104  */
105 #define TAS_BUFFER_FNS(bit, name)                                       \
106 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107 {                                                                       \
108         return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
109 }                                                                       \
110 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111 {                                                                       \
112         return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
113 }                                                                       \
114
115 /*
116  * Emit the buffer bitops functions.   Note that there are also functions
117  * of the form "mark_buffer_foo()".  These are higher-level functions which
118  * do something in addition to setting a b_state bit.
119  */
120 BUFFER_FNS(Dirty, dirty)
121 TAS_BUFFER_FNS(Dirty, dirty)
122 BUFFER_FNS(Lock, locked)
123 BUFFER_FNS(Req, req)
124 TAS_BUFFER_FNS(Req, req)
125 BUFFER_FNS(Mapped, mapped)
126 BUFFER_FNS(New, new)
127 BUFFER_FNS(Async_Read, async_read)
128 BUFFER_FNS(Async_Write, async_write)
129 BUFFER_FNS(Delay, delay)
130 BUFFER_FNS(Boundary, boundary)
131 BUFFER_FNS(Write_EIO, write_io_error)
132 BUFFER_FNS(Unwritten, unwritten)
133 BUFFER_FNS(Meta, meta)
134 BUFFER_FNS(Prio, prio)
135 BUFFER_FNS(Defer_Completion, defer_completion)
136
137 static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
138 {
139         /*
140          * If somebody else already set this uptodate, they will
141          * have done the memory barrier, and a reader will thus
142          * see *some* valid buffer state.
143          *
144          * Any other serialization (with IO errors or whatever that
145          * might clear the bit) has to come from other state (eg BH_Lock).
146          */
147         if (test_bit(BH_Uptodate, &bh->b_state))
148                 return;
149
150         /*
151          * make it consistent with folio_mark_uptodate
152          * pairs with smp_load_acquire in buffer_uptodate
153          */
154         smp_mb__before_atomic();
155         set_bit(BH_Uptodate, &bh->b_state);
156 }
157
158 static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
159 {
160         clear_bit(BH_Uptodate, &bh->b_state);
161 }
162
163 static __always_inline int buffer_uptodate(const struct buffer_head *bh)
164 {
165         /*
166          * make it consistent with folio_test_uptodate
167          * pairs with smp_mb__before_atomic in set_buffer_uptodate
168          */
169         return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
170 }
171
172 #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
173
174 /* If we *know* page->private refers to buffer_heads */
175 #define page_buffers(page)                                      \
176         ({                                                      \
177                 BUG_ON(!PagePrivate(page));                     \
178                 ((struct buffer_head *)page_private(page));     \
179         })
180 #define page_has_buffers(page)  PagePrivate(page)
181
182 void buffer_check_dirty_writeback(struct page *page,
183                                      bool *dirty, bool *writeback);
184
185 /*
186  * Declarations
187  */
188
189 void mark_buffer_dirty(struct buffer_head *bh);
190 void mark_buffer_write_io_error(struct buffer_head *bh);
191 void touch_buffer(struct buffer_head *bh);
192 void set_bh_page(struct buffer_head *bh,
193                 struct page *page, unsigned long offset);
194 int try_to_free_buffers(struct page *);
195 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
196                 bool retry);
197 void create_empty_buffers(struct page *, unsigned long,
198                         unsigned long b_state);
199 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
200 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
201 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
202
203 /* Things to do with buffers at mapping->private_list */
204 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
205 int inode_has_buffers(struct inode *);
206 void invalidate_inode_buffers(struct inode *);
207 int remove_inode_buffers(struct inode *inode);
208 int sync_mapping_buffers(struct address_space *mapping);
209 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
210                         sector_t len);
211 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
212 {
213         clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
214 }
215
216 void mark_buffer_async_write(struct buffer_head *bh);
217 void __wait_on_buffer(struct buffer_head *);
218 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
219 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
220                         unsigned size);
221 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
222                                   unsigned size, gfp_t gfp);
223 void __brelse(struct buffer_head *);
224 void __bforget(struct buffer_head *);
225 void __breadahead(struct block_device *, sector_t block, unsigned int size);
226 void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
227                   gfp_t gfp);
228 struct buffer_head *__bread_gfp(struct block_device *,
229                                 sector_t block, unsigned size, gfp_t gfp);
230 void invalidate_bh_lrus(void);
231 void invalidate_bh_lrus_cpu(void);
232 bool has_bh_in_lru(int cpu, void *dummy);
233 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
234 void free_buffer_head(struct buffer_head * bh);
235 void unlock_buffer(struct buffer_head *bh);
236 void __lock_buffer(struct buffer_head *bh);
237 void ll_rw_block(int, int, int, struct buffer_head * bh[]);
238 int sync_dirty_buffer(struct buffer_head *bh);
239 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
240 void write_dirty_buffer(struct buffer_head *bh, int op_flags);
241 int submit_bh(int, int, struct buffer_head *);
242 void write_boundary_block(struct block_device *bdev,
243                         sector_t bblock, unsigned blocksize);
244 int bh_uptodate_or_lock(struct buffer_head *bh);
245 int bh_submit_read(struct buffer_head *bh);
246
247 extern int buffer_heads_over_limit;
248
249 /*
250  * Generic address_space_operations implementations for buffer_head-backed
251  * address_spaces.
252  */
253 void block_invalidatepage(struct page *page, unsigned int offset,
254                           unsigned int length);
255 int block_write_full_page(struct page *page, get_block_t *get_block,
256                                 struct writeback_control *wbc);
257 int __block_write_full_page(struct inode *inode, struct page *page,
258                         get_block_t *get_block, struct writeback_control *wbc,
259                         bh_end_io_t *handler);
260 int block_read_full_page(struct page*, get_block_t*);
261 int block_is_partially_uptodate(struct page *page, unsigned long from,
262                                 unsigned long count);
263 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
264                 unsigned flags, struct page **pagep, get_block_t *get_block);
265 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
266                 get_block_t *get_block);
267 int block_write_end(struct file *, struct address_space *,
268                                 loff_t, unsigned, unsigned,
269                                 struct page *, void *);
270 int generic_write_end(struct file *, struct address_space *,
271                                 loff_t, unsigned, unsigned,
272                                 struct page *, void *);
273 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
274 void clean_page_buffers(struct page *page);
275 int cont_write_begin(struct file *, struct address_space *, loff_t,
276                         unsigned, unsigned, struct page **, void **,
277                         get_block_t *, loff_t *);
278 int generic_cont_expand_simple(struct inode *inode, loff_t size);
279 int block_commit_write(struct page *page, unsigned from, unsigned to);
280 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
281                                 get_block_t get_block);
282 /* Convert errno to return value from ->page_mkwrite() call */
283 static inline vm_fault_t block_page_mkwrite_return(int err)
284 {
285         if (err == 0)
286                 return VM_FAULT_LOCKED;
287         if (err == -EFAULT || err == -EAGAIN)
288                 return VM_FAULT_NOPAGE;
289         if (err == -ENOMEM)
290                 return VM_FAULT_OOM;
291         /* -ENOSPC, -EDQUOT, -EIO ... */
292         return VM_FAULT_SIGBUS;
293 }
294 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
295 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
296 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
297                                 struct page **, void **, get_block_t*);
298 int nobh_write_end(struct file *, struct address_space *,
299                                 loff_t, unsigned, unsigned,
300                                 struct page *, void *);
301 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
302 int nobh_writepage(struct page *page, get_block_t *get_block,
303                         struct writeback_control *wbc);
304
305 void buffer_init(void);
306
307 /*
308  * inline definitions
309  */
310
311 static inline void get_bh(struct buffer_head *bh)
312 {
313         atomic_inc(&bh->b_count);
314 }
315
316 static inline void put_bh(struct buffer_head *bh)
317 {
318         smp_mb__before_atomic();
319         atomic_dec(&bh->b_count);
320 }
321
322 static inline void brelse(struct buffer_head *bh)
323 {
324         if (bh)
325                 __brelse(bh);
326 }
327
328 static inline void bforget(struct buffer_head *bh)
329 {
330         if (bh)
331                 __bforget(bh);
332 }
333
334 static inline struct buffer_head *
335 sb_bread(struct super_block *sb, sector_t block)
336 {
337         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
338 }
339
340 static inline struct buffer_head *
341 sb_bread_unmovable(struct super_block *sb, sector_t block)
342 {
343         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
344 }
345
346 static inline void
347 sb_breadahead(struct super_block *sb, sector_t block)
348 {
349         __breadahead(sb->s_bdev, block, sb->s_blocksize);
350 }
351
352 static inline void
353 sb_breadahead_unmovable(struct super_block *sb, sector_t block)
354 {
355         __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
356 }
357
358 static inline struct buffer_head *
359 sb_getblk(struct super_block *sb, sector_t block)
360 {
361         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
362 }
363
364
365 static inline struct buffer_head *
366 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
367 {
368         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
369 }
370
371 static inline struct buffer_head *
372 sb_find_get_block(struct super_block *sb, sector_t block)
373 {
374         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
375 }
376
377 static inline void
378 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
379 {
380         set_buffer_mapped(bh);
381         bh->b_bdev = sb->s_bdev;
382         bh->b_blocknr = block;
383         bh->b_size = sb->s_blocksize;
384 }
385
386 static inline void wait_on_buffer(struct buffer_head *bh)
387 {
388         might_sleep();
389         if (buffer_locked(bh))
390                 __wait_on_buffer(bh);
391 }
392
393 static inline int trylock_buffer(struct buffer_head *bh)
394 {
395         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
396 }
397
398 static inline void lock_buffer(struct buffer_head *bh)
399 {
400         might_sleep();
401         if (!trylock_buffer(bh))
402                 __lock_buffer(bh);
403 }
404
405 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
406                                                    sector_t block,
407                                                    unsigned size)
408 {
409         return __getblk_gfp(bdev, block, size, 0);
410 }
411
412 static inline struct buffer_head *__getblk(struct block_device *bdev,
413                                            sector_t block,
414                                            unsigned size)
415 {
416         return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
417 }
418
419 /**
420  *  __bread() - reads a specified block and returns the bh
421  *  @bdev: the block_device to read from
422  *  @block: number of block
423  *  @size: size (in bytes) to read
424  *
425  *  Reads a specified block, and returns buffer head that contains it.
426  *  The page cache is allocated from movable area so that it can be migrated.
427  *  It returns NULL if the block was unreadable.
428  */
429 static inline struct buffer_head *
430 __bread(struct block_device *bdev, sector_t block, unsigned size)
431 {
432         return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
433 }
434
435 extern int __set_page_dirty_buffers(struct page *page);
436
437 #else /* CONFIG_BLOCK */
438
439 static inline void buffer_init(void) {}
440 static inline int try_to_free_buffers(struct page *page) { return 1; }
441 static inline int inode_has_buffers(struct inode *inode) { return 0; }
442 static inline void invalidate_inode_buffers(struct inode *inode) {}
443 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
444 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
445 static inline void invalidate_bh_lrus_cpu(void) {}
446 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
447 #define buffer_heads_over_limit 0
448
449 #endif /* CONFIG_BLOCK */
450 #endif /* _LINUX_BUFFER_HEAD_H */