2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/device.h>
16 #include <linux/writeback.h>
17 #include <linux/blk-cgroup.h>
18 #include <linux/backing-dev-defs.h>
19 #include <linux/slab.h>
21 int __must_check bdi_init(struct backing_dev_info *bdi);
22 void bdi_exit(struct backing_dev_info *bdi);
25 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
26 const char *fmt, ...);
27 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
29 void bdi_unregister(struct backing_dev_info *bdi);
31 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
32 void bdi_destroy(struct backing_dev_info *bdi);
34 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
35 bool range_cyclic, enum wb_reason reason);
36 void wb_start_background_writeback(struct bdi_writeback *wb);
37 void wb_workfn(struct work_struct *work);
38 void wb_wakeup_delayed(struct bdi_writeback *wb);
40 extern spinlock_t bdi_lock;
41 extern struct list_head bdi_list;
43 extern struct workqueue_struct *bdi_wq;
45 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
47 return test_bit(WB_has_dirty_io, &wb->state);
50 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
53 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
54 * any dirty wbs. See wb_update_write_bandwidth().
56 return atomic_long_read(&bdi->tot_write_bandwidth);
59 static inline void __add_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item, s64 amount)
62 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
65 static inline void __inc_wb_stat(struct bdi_writeback *wb,
66 enum wb_stat_item item)
68 __add_wb_stat(wb, item, 1);
71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
75 local_irq_save(flags);
76 __inc_wb_stat(wb, item);
77 local_irq_restore(flags);
80 static inline void __dec_wb_stat(struct bdi_writeback *wb,
81 enum wb_stat_item item)
83 __add_wb_stat(wb, item, -1);
86 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
90 local_irq_save(flags);
91 __dec_wb_stat(wb, item);
92 local_irq_restore(flags);
95 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
97 return percpu_counter_read_positive(&wb->stat[item]);
100 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
101 enum wb_stat_item item)
103 return percpu_counter_sum_positive(&wb->stat[item]);
106 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
111 local_irq_save(flags);
112 sum = __wb_stat_sum(wb, item);
113 local_irq_restore(flags);
118 extern void wb_writeout_inc(struct bdi_writeback *wb);
121 * maximal error of a stat counter.
123 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
126 return nr_cpu_ids * WB_STAT_BATCH;
132 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
133 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
136 * Flags in backing_dev_info::capability
138 * The first three flags control whether dirty pages will contribute to the
139 * VM's accounting and whether writepages() should be called for dirty pages
140 * (something that would not, for example, be appropriate for ramfs)
142 * WARNING: these flags are closely related and should not normally be
143 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
144 * three flags into a single convenience macro.
146 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
147 * BDI_CAP_NO_WRITEBACK: Don't write pages back
148 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
149 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
151 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
153 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
154 #define BDI_CAP_NO_WRITEBACK 0x00000002
155 #define BDI_CAP_NO_ACCT_WB 0x00000004
156 #define BDI_CAP_STABLE_WRITES 0x00000008
157 #define BDI_CAP_STRICTLIMIT 0x00000010
158 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
160 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
161 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
163 extern struct backing_dev_info noop_backing_dev_info;
166 * writeback_in_progress - determine whether there is writeback in progress
167 * @wb: bdi_writeback of interest
169 * Determine whether there is writeback waiting to be handled against a
172 static inline bool writeback_in_progress(struct bdi_writeback *wb)
174 return test_bit(WB_writeback_running, &wb->state);
177 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
179 struct super_block *sb;
182 return &noop_backing_dev_info;
186 if (sb_is_blkdev_sb(sb))
187 return blk_get_backing_dev_info(I_BDEV(inode));
192 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
194 struct backing_dev_info *bdi = wb->bdi;
196 if (bdi->congested_fn)
197 return bdi->congested_fn(bdi->congested_data, cong_bits);
198 return wb->congested->state & cong_bits;
201 long congestion_wait(int sync, long timeout);
202 long wait_iff_congested(struct zone *zone, int sync, long timeout);
203 int pdflush_proc_obsolete(struct ctl_table *table, int write,
204 void __user *buffer, size_t *lenp, loff_t *ppos);
206 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
208 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
211 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
213 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
216 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
218 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
221 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
223 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
224 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
225 BDI_CAP_NO_WRITEBACK));
228 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
230 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
233 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
235 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
238 static inline int bdi_sched_wait(void *word)
244 #ifdef CONFIG_CGROUP_WRITEBACK
246 struct bdi_writeback_congested *
247 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
248 void wb_congested_put(struct bdi_writeback_congested *congested);
249 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
250 struct cgroup_subsys_state *memcg_css,
252 void wb_memcg_offline(struct mem_cgroup *memcg);
253 void wb_blkcg_offline(struct blkcg *blkcg);
254 int inode_congested(struct inode *inode, int cong_bits);
257 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
258 * @inode: inode of interest
260 * cgroup writeback requires support from both the bdi and filesystem.
261 * Also, both memcg and iocg have to be on the default hierarchy. Test
262 * whether all conditions are met.
264 * Note that the test result may change dynamically on the same inode
265 * depending on how memcg and iocg are configured.
267 static inline bool inode_cgwb_enabled(struct inode *inode)
269 struct backing_dev_info *bdi = inode_to_bdi(inode);
271 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
272 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
273 bdi_cap_account_dirty(bdi) &&
274 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
275 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
279 * wb_find_current - find wb for %current on a bdi
280 * @bdi: bdi of interest
282 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
283 * Must be called under rcu_read_lock() which protects the returend wb.
286 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
288 struct cgroup_subsys_state *memcg_css;
289 struct bdi_writeback *wb;
291 memcg_css = task_css(current, memory_cgrp_id);
292 if (!memcg_css->parent)
295 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
298 * %current's blkcg equals the effective blkcg of its memcg. No
299 * need to use the relatively expensive cgroup_get_e_css().
301 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
307 * wb_get_create_current - get or create wb for %current on a bdi
308 * @bdi: bdi of interest
309 * @gfp: allocation mask
311 * Equivalent to wb_get_create() on %current's memcg. This function is
312 * called from a relatively hot path and optimizes the common cases using
315 static inline struct bdi_writeback *
316 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
318 struct bdi_writeback *wb;
321 wb = wb_find_current(bdi);
322 if (wb && unlikely(!wb_tryget(wb)))
327 struct cgroup_subsys_state *memcg_css;
329 memcg_css = task_get_css(current, memory_cgrp_id);
330 wb = wb_get_create(bdi, memcg_css, gfp);
337 * inode_to_wb_is_valid - test whether an inode has a wb associated
338 * @inode: inode of interest
340 * Returns %true if @inode has a wb associated. May be called without any
343 static inline bool inode_to_wb_is_valid(struct inode *inode)
349 * inode_to_wb - determine the wb of an inode
350 * @inode: inode of interest
352 * Returns the wb @inode is currently associated with. The caller must be
353 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
354 * associated wb's list_lock.
356 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
358 #ifdef CONFIG_LOCKDEP
359 WARN_ON_ONCE(debug_locks &&
360 (!lockdep_is_held(&inode->i_lock) &&
361 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
362 !lockdep_is_held(&inode->i_wb->list_lock)));
368 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
369 * @inode: target inode
370 * @cookie: output param, to be passed to the end function
372 * The caller wants to access the wb associated with @inode but isn't
373 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
374 * function determines the wb associated with @inode and ensures that the
375 * association doesn't change until the transaction is finished with
376 * unlocked_inode_to_wb_end().
378 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
379 * can't sleep during the transaction. IRQs may or may not be disabled on
382 static inline struct bdi_writeback *
383 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
388 * Paired with store_release in inode_switch_wb_work_fn() and
389 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
391 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
393 if (unlikely(cookie->locked))
394 spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
397 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
398 * inode_to_wb() will bark. Deref directly.
404 * unlocked_inode_to_wb_end - end inode wb access transaction
405 * @inode: target inode
406 * @cookie: @cookie from unlocked_inode_to_wb_begin()
408 static inline void unlocked_inode_to_wb_end(struct inode *inode,
409 struct wb_lock_cookie *cookie)
411 if (unlikely(cookie->locked))
412 spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
418 #else /* CONFIG_CGROUP_WRITEBACK */
420 static inline bool inode_cgwb_enabled(struct inode *inode)
425 static inline struct bdi_writeback_congested *
426 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
428 atomic_inc(&bdi->wb_congested->refcnt);
429 return bdi->wb_congested;
432 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
434 if (atomic_dec_and_test(&congested->refcnt))
438 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
443 static inline struct bdi_writeback *
444 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
449 static inline bool inode_to_wb_is_valid(struct inode *inode)
454 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
456 return &inode_to_bdi(inode)->wb;
459 static inline struct bdi_writeback *
460 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
462 return inode_to_wb(inode);
465 static inline void unlocked_inode_to_wb_end(struct inode *inode,
466 struct wb_lock_cookie *cookie)
470 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
474 static inline void wb_blkcg_offline(struct blkcg *blkcg)
478 static inline int inode_congested(struct inode *inode, int cong_bits)
480 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
483 #endif /* CONFIG_CGROUP_WRITEBACK */
485 static inline int inode_read_congested(struct inode *inode)
487 return inode_congested(inode, 1 << WB_sync_congested);
490 static inline int inode_write_congested(struct inode *inode)
492 return inode_congested(inode, 1 << WB_async_congested);
495 static inline int inode_rw_congested(struct inode *inode)
497 return inode_congested(inode, (1 << WB_sync_congested) |
498 (1 << WB_async_congested));
501 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
503 return wb_congested(&bdi->wb, cong_bits);
506 static inline int bdi_read_congested(struct backing_dev_info *bdi)
508 return bdi_congested(bdi, 1 << WB_sync_congested);
511 static inline int bdi_write_congested(struct backing_dev_info *bdi)
513 return bdi_congested(bdi, 1 << WB_async_congested);
516 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
518 return bdi_congested(bdi, (1 << WB_sync_congested) |
519 (1 << WB_async_congested));
522 extern const char *bdi_unknown_name;
524 static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
526 if (!bdi || !bdi->dev)
527 return bdi_unknown_name;
528 return dev_name(bdi->dev);
531 #endif /* _LINUX_BACKING_DEV_H */