2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
15 struct backing_dev_info noop_backing_dev_info = {
17 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
21 static struct class *bdi_class;
22 const char *bdi_unknown_name = "(unknown)";
25 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
28 DEFINE_SPINLOCK(bdi_lock);
31 /* bdi_wq serves all asynchronous writeback tasks */
32 struct workqueue_struct *bdi_wq;
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
38 static struct dentry *bdi_debug_root;
40 static void bdi_debug_init(void)
42 bdi_debug_root = debugfs_create_dir("bdi", NULL);
45 static int bdi_debug_stats_show(struct seq_file *m, void *v)
47 struct backing_dev_info *bdi = m->private;
48 struct bdi_writeback *wb = &bdi->wb;
49 unsigned long background_thresh;
50 unsigned long dirty_thresh;
51 unsigned long wb_thresh;
52 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
55 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
56 spin_lock(&wb->list_lock);
57 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
59 list_for_each_entry(inode, &wb->b_io, i_io_list)
61 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
63 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
64 if (inode->i_state & I_DIRTY_TIME)
66 spin_unlock(&wb->list_lock);
68 global_dirty_limits(&background_thresh, &dirty_thresh);
69 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
71 #define K(x) ((x) << (PAGE_SHIFT - 10))
73 "BdiWriteback: %10lu kB\n"
74 "BdiReclaimable: %10lu kB\n"
75 "BdiDirtyThresh: %10lu kB\n"
76 "DirtyThresh: %10lu kB\n"
77 "BackgroundThresh: %10lu kB\n"
78 "BdiDirtied: %10lu kB\n"
79 "BdiWritten: %10lu kB\n"
80 "BdiWriteBandwidth: %10lu kBps\n"
84 "b_dirty_time: %10lu\n"
87 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
88 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
92 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
93 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
94 (unsigned long) K(wb->write_bandwidth),
99 !list_empty(&bdi->bdi_list), bdi->wb.state);
105 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107 return single_open(file, bdi_debug_stats_show, inode->i_private);
110 static const struct file_operations bdi_debug_stats_fops = {
111 .open = bdi_debug_stats_open,
114 .release = single_release,
117 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
120 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
121 bdi, &bdi_debug_stats_fops);
124 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 debugfs_remove(bdi->debug_stats);
127 debugfs_remove(bdi->debug_dir);
130 static inline void bdi_debug_init(void)
133 static inline void bdi_debug_register(struct backing_dev_info *bdi,
137 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
142 static ssize_t read_ahead_kb_store(struct device *dev,
143 struct device_attribute *attr,
144 const char *buf, size_t count)
146 struct backing_dev_info *bdi = dev_get_drvdata(dev);
147 unsigned long read_ahead_kb;
150 ret = kstrtoul(buf, 10, &read_ahead_kb);
154 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
159 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
161 #define BDI_SHOW(name, expr) \
162 static ssize_t name##_show(struct device *dev, \
163 struct device_attribute *attr, char *page) \
165 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
167 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
169 static DEVICE_ATTR_RW(name);
171 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173 static ssize_t min_ratio_store(struct device *dev,
174 struct device_attribute *attr, const char *buf, size_t count)
176 struct backing_dev_info *bdi = dev_get_drvdata(dev);
180 ret = kstrtouint(buf, 10, &ratio);
184 ret = bdi_set_min_ratio(bdi, ratio);
190 BDI_SHOW(min_ratio, bdi->min_ratio)
192 static ssize_t max_ratio_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
195 struct backing_dev_info *bdi = dev_get_drvdata(dev);
199 ret = kstrtouint(buf, 10, &ratio);
203 ret = bdi_set_max_ratio(bdi, ratio);
209 BDI_SHOW(max_ratio, bdi->max_ratio)
211 static ssize_t stable_pages_required_show(struct device *dev,
212 struct device_attribute *attr,
215 struct backing_dev_info *bdi = dev_get_drvdata(dev);
217 return snprintf(page, PAGE_SIZE-1, "%d\n",
218 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220 static DEVICE_ATTR_RO(stable_pages_required);
222 static struct attribute *bdi_dev_attrs[] = {
223 &dev_attr_read_ahead_kb.attr,
224 &dev_attr_min_ratio.attr,
225 &dev_attr_max_ratio.attr,
226 &dev_attr_stable_pages_required.attr,
229 ATTRIBUTE_GROUPS(bdi_dev);
231 static __init int bdi_class_init(void)
233 bdi_class = class_create(THIS_MODULE, "bdi");
234 if (IS_ERR(bdi_class))
235 return PTR_ERR(bdi_class);
237 bdi_class->dev_groups = bdi_dev_groups;
242 postcore_initcall(bdi_class_init);
244 static int bdi_init(struct backing_dev_info *bdi);
246 static int __init default_bdi_init(void)
250 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
255 err = bdi_init(&noop_backing_dev_info);
259 subsys_initcall(default_bdi_init);
262 * This function is used when the first inode for this wb is marked dirty. It
263 * wakes-up the corresponding bdi thread which should then take care of the
264 * periodic background write-out of dirty inodes. Since the write-out would
265 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
266 * set up a timer which wakes the bdi thread up later.
268 * Note, we wouldn't bother setting up the timer, but this function is on the
269 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
270 * by delaying the wake-up.
272 * We have to be careful not to postpone flush work if it is scheduled for
273 * earlier. Thus we use queue_delayed_work().
275 void wb_wakeup_delayed(struct bdi_writeback *wb)
277 unsigned long timeout;
279 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
280 spin_lock_bh(&wb->work_lock);
281 if (test_bit(WB_registered, &wb->state))
282 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
283 spin_unlock_bh(&wb->work_lock);
287 * Initial write bandwidth: 100 MB/s
289 #define INIT_BW (100 << (20 - PAGE_SHIFT))
291 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
292 int blkcg_id, gfp_t gfp)
296 memset(wb, 0, sizeof(*wb));
301 wb->last_old_flush = jiffies;
302 INIT_LIST_HEAD(&wb->b_dirty);
303 INIT_LIST_HEAD(&wb->b_io);
304 INIT_LIST_HEAD(&wb->b_more_io);
305 INIT_LIST_HEAD(&wb->b_dirty_time);
306 spin_lock_init(&wb->list_lock);
308 wb->bw_time_stamp = jiffies;
309 wb->balanced_dirty_ratelimit = INIT_BW;
310 wb->dirty_ratelimit = INIT_BW;
311 wb->write_bandwidth = INIT_BW;
312 wb->avg_write_bandwidth = INIT_BW;
314 spin_lock_init(&wb->work_lock);
315 INIT_LIST_HEAD(&wb->work_list);
316 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
317 wb->dirty_sleep = jiffies;
319 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
320 if (!wb->congested) {
325 err = fprop_local_init_percpu(&wb->completions, gfp);
329 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
330 err = percpu_counter_init(&wb->stat[i], 0, gfp);
332 goto out_destroy_stat;
339 percpu_counter_destroy(&wb->stat[i]);
340 fprop_local_destroy_percpu(&wb->completions);
342 wb_congested_put(wb->congested);
349 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
352 * Remove bdi from the global list and shutdown any threads we have running
354 static void wb_shutdown(struct bdi_writeback *wb)
356 /* Make sure nobody queues further work */
357 spin_lock_bh(&wb->work_lock);
358 if (!test_and_clear_bit(WB_registered, &wb->state)) {
359 spin_unlock_bh(&wb->work_lock);
362 spin_unlock_bh(&wb->work_lock);
364 cgwb_remove_from_bdi_list(wb);
366 * Drain work list and shutdown the delayed_work. !WB_registered
367 * tells wb_workfn() that @wb is dying and its work_list needs to
368 * be drained no matter what.
370 mod_delayed_work(bdi_wq, &wb->dwork, 0);
371 flush_delayed_work(&wb->dwork);
372 WARN_ON(!list_empty(&wb->work_list));
375 static void wb_exit(struct bdi_writeback *wb)
379 WARN_ON(delayed_work_pending(&wb->dwork));
381 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
382 percpu_counter_destroy(&wb->stat[i]);
384 fprop_local_destroy_percpu(&wb->completions);
385 wb_congested_put(wb->congested);
386 if (wb != &wb->bdi->wb)
390 #ifdef CONFIG_CGROUP_WRITEBACK
392 #include <linux/memcontrol.h>
395 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
396 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
399 static DEFINE_SPINLOCK(cgwb_lock);
400 static struct workqueue_struct *cgwb_release_wq;
403 * wb_congested_get_create - get or create a wb_congested
404 * @bdi: associated bdi
405 * @blkcg_id: ID of the associated blkcg
406 * @gfp: allocation mask
408 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
409 * The returned wb_congested has its reference count incremented. Returns
412 struct bdi_writeback_congested *
413 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
415 struct bdi_writeback_congested *new_congested = NULL, *congested;
416 struct rb_node **node, *parent;
419 spin_lock_irqsave(&cgwb_lock, flags);
421 node = &bdi->cgwb_congested_tree.rb_node;
424 while (*node != NULL) {
426 congested = rb_entry(parent, struct bdi_writeback_congested,
428 if (congested->blkcg_id < blkcg_id)
429 node = &parent->rb_left;
430 else if (congested->blkcg_id > blkcg_id)
431 node = &parent->rb_right;
437 /* !found and storage for new one already allocated, insert */
438 congested = new_congested;
439 new_congested = NULL;
440 rb_link_node(&congested->rb_node, parent, node);
441 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
445 spin_unlock_irqrestore(&cgwb_lock, flags);
447 /* allocate storage for new one and retry */
448 new_congested = kzalloc(sizeof(*new_congested), gfp);
452 atomic_set(&new_congested->refcnt, 0);
453 new_congested->__bdi = bdi;
454 new_congested->blkcg_id = blkcg_id;
458 atomic_inc(&congested->refcnt);
459 spin_unlock_irqrestore(&cgwb_lock, flags);
460 kfree(new_congested);
465 * wb_congested_put - put a wb_congested
466 * @congested: wb_congested to put
468 * Put @congested and destroy it if the refcnt reaches zero.
470 void wb_congested_put(struct bdi_writeback_congested *congested)
474 local_irq_save(flags);
475 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
476 local_irq_restore(flags);
480 /* bdi might already have been destroyed leaving @congested unlinked */
481 if (congested->__bdi) {
482 rb_erase(&congested->rb_node,
483 &congested->__bdi->cgwb_congested_tree);
484 congested->__bdi = NULL;
487 spin_unlock_irqrestore(&cgwb_lock, flags);
491 static void cgwb_release_workfn(struct work_struct *work)
493 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
496 mutex_lock(&wb->bdi->cgwb_release_mutex);
499 css_put(wb->memcg_css);
500 css_put(wb->blkcg_css);
501 mutex_unlock(&wb->bdi->cgwb_release_mutex);
503 fprop_local_destroy_percpu(&wb->memcg_completions);
504 percpu_ref_exit(&wb->refcnt);
509 static void cgwb_release(struct percpu_ref *refcnt)
511 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
513 queue_work(cgwb_release_wq, &wb->release_work);
516 static void cgwb_kill(struct bdi_writeback *wb)
518 lockdep_assert_held(&cgwb_lock);
520 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
521 list_del(&wb->memcg_node);
522 list_del(&wb->blkcg_node);
523 percpu_ref_kill(&wb->refcnt);
526 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
528 spin_lock_irq(&cgwb_lock);
529 list_del_rcu(&wb->bdi_node);
530 spin_unlock_irq(&cgwb_lock);
533 static int cgwb_create(struct backing_dev_info *bdi,
534 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
536 struct mem_cgroup *memcg;
537 struct cgroup_subsys_state *blkcg_css;
539 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
540 struct bdi_writeback *wb;
544 memcg = mem_cgroup_from_css(memcg_css);
545 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
546 blkcg = css_to_blkcg(blkcg_css);
547 memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
548 blkcg_cgwb_list = &blkcg->cgwb_list;
550 /* look up again under lock and discard on blkcg mismatch */
551 spin_lock_irqsave(&cgwb_lock, flags);
552 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
553 if (wb && wb->blkcg_css != blkcg_css) {
557 spin_unlock_irqrestore(&cgwb_lock, flags);
561 /* need to create a new one */
562 wb = kmalloc(sizeof(*wb), gfp);
568 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
572 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
576 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
580 wb->memcg_css = memcg_css;
581 wb->blkcg_css = blkcg_css;
582 INIT_WORK(&wb->release_work, cgwb_release_workfn);
583 set_bit(WB_registered, &wb->state);
586 * The root wb determines the registered state of the whole bdi and
587 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
588 * whether they're still online. Don't link @wb if any is dead.
589 * See wb_memcg_offline() and wb_blkcg_offline().
592 spin_lock_irqsave(&cgwb_lock, flags);
593 if (test_bit(WB_registered, &bdi->wb.state) &&
594 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
595 /* we might have raced another instance of this function */
596 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
598 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
599 list_add(&wb->memcg_node, memcg_cgwb_list);
600 list_add(&wb->blkcg_node, blkcg_cgwb_list);
605 spin_unlock_irqrestore(&cgwb_lock, flags);
614 fprop_local_destroy_percpu(&wb->memcg_completions);
616 percpu_ref_exit(&wb->refcnt);
627 * wb_get_create - get wb for a given memcg, create if necessary
629 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
630 * @gfp: allocation mask to use
632 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
633 * create one. The returned wb has its refcount incremented.
635 * This function uses css_get() on @memcg_css and thus expects its refcnt
636 * to be positive on invocation. IOW, rcu_read_lock() protection on
637 * @memcg_css isn't enough. try_get it before calling this function.
639 * A wb is keyed by its associated memcg. As blkcg implicitly enables
640 * memcg on the default hierarchy, memcg association is guaranteed to be
641 * more specific (equal or descendant to the associated blkcg) and thus can
642 * identify both the memcg and blkcg associations.
644 * Because the blkcg associated with a memcg may change as blkcg is enabled
645 * and disabled closer to root in the hierarchy, each wb keeps track of
646 * both the memcg and blkcg associated with it and verifies the blkcg on
647 * each lookup. On mismatch, the existing wb is discarded and a new one is
650 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
651 struct cgroup_subsys_state *memcg_css,
654 struct bdi_writeback *wb;
656 might_sleep_if(gfpflags_allow_blocking(gfp));
658 if (!memcg_css->parent)
663 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
665 struct cgroup_subsys_state *blkcg_css;
667 /* see whether the blkcg association has changed */
668 blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
670 if (unlikely(wb->blkcg_css != blkcg_css ||
676 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
681 static int cgwb_bdi_init(struct backing_dev_info *bdi)
685 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
686 bdi->cgwb_congested_tree = RB_ROOT;
687 mutex_init(&bdi->cgwb_release_mutex);
688 init_rwsem(&bdi->wb_switch_rwsem);
690 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
692 bdi->wb.memcg_css = &root_mem_cgroup->css;
693 bdi->wb.blkcg_css = blkcg_root_css;
698 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
700 struct radix_tree_iter iter;
702 struct bdi_writeback *wb;
704 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
706 spin_lock_irq(&cgwb_lock);
707 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
709 spin_unlock_irq(&cgwb_lock);
711 mutex_lock(&bdi->cgwb_release_mutex);
712 spin_lock_irq(&cgwb_lock);
713 while (!list_empty(&bdi->wb_list)) {
714 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
716 spin_unlock_irq(&cgwb_lock);
718 spin_lock_irq(&cgwb_lock);
720 spin_unlock_irq(&cgwb_lock);
721 mutex_unlock(&bdi->cgwb_release_mutex);
725 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
726 * @memcg: memcg being offlined
728 * Also prevents creation of any new wb's associated with @memcg.
730 void wb_memcg_offline(struct mem_cgroup *memcg)
732 LIST_HEAD(to_destroy);
733 struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
734 struct bdi_writeback *wb, *next;
736 spin_lock_irq(&cgwb_lock);
737 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
739 memcg_cgwb_list->next = NULL; /* prevent new wb's */
740 spin_unlock_irq(&cgwb_lock);
744 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
745 * @blkcg: blkcg being offlined
747 * Also prevents creation of any new wb's associated with @blkcg.
749 void wb_blkcg_offline(struct blkcg *blkcg)
751 LIST_HEAD(to_destroy);
752 struct bdi_writeback *wb, *next;
754 spin_lock_irq(&cgwb_lock);
755 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
757 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
758 spin_unlock_irq(&cgwb_lock);
761 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
765 spin_lock_irq(&cgwb_lock);
766 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
767 struct bdi_writeback_congested *congested =
768 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
770 rb_erase(rbn, &bdi->cgwb_congested_tree);
771 congested->__bdi = NULL; /* mark @congested unlinked */
773 spin_unlock_irq(&cgwb_lock);
776 static void cgwb_bdi_register(struct backing_dev_info *bdi)
778 spin_lock_irq(&cgwb_lock);
779 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
780 spin_unlock_irq(&cgwb_lock);
783 static int __init cgwb_init(void)
786 * There can be many concurrent release work items overwhelming
787 * system_wq. Put them in a separate wq and limit concurrency.
788 * There's no point in executing many of these in parallel.
790 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
791 if (!cgwb_release_wq)
796 subsys_initcall(cgwb_init);
798 #else /* CONFIG_CGROUP_WRITEBACK */
800 static int cgwb_bdi_init(struct backing_dev_info *bdi)
804 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
805 if (!bdi->wb_congested)
808 atomic_set(&bdi->wb_congested->refcnt, 1);
810 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
812 wb_congested_put(bdi->wb_congested);
818 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
820 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
822 wb_congested_put(bdi->wb_congested);
825 static void cgwb_bdi_register(struct backing_dev_info *bdi)
827 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
830 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
832 list_del_rcu(&wb->bdi_node);
835 #endif /* CONFIG_CGROUP_WRITEBACK */
837 static int bdi_init(struct backing_dev_info *bdi)
843 kref_init(&bdi->refcnt);
845 bdi->max_ratio = 100;
846 bdi->max_prop_frac = FPROP_FRAC_BASE;
847 INIT_LIST_HEAD(&bdi->bdi_list);
848 INIT_LIST_HEAD(&bdi->wb_list);
849 init_waitqueue_head(&bdi->wb_waitq);
851 ret = cgwb_bdi_init(bdi);
856 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
858 struct backing_dev_info *bdi;
860 bdi = kmalloc_node(sizeof(struct backing_dev_info),
861 gfp_mask | __GFP_ZERO, node_id);
871 EXPORT_SYMBOL(bdi_alloc_node);
873 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
877 if (bdi->dev) /* The driver needs to use separate queues per device */
880 dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
884 cgwb_bdi_register(bdi);
887 bdi_debug_register(bdi, dev_name(dev));
888 set_bit(WB_registered, &bdi->wb.state);
890 spin_lock_bh(&bdi_lock);
891 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
892 spin_unlock_bh(&bdi_lock);
894 trace_writeback_bdi_register(bdi);
897 EXPORT_SYMBOL(bdi_register_va);
899 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
905 ret = bdi_register_va(bdi, fmt, args);
909 EXPORT_SYMBOL(bdi_register);
911 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
915 rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
918 /* Leaking owner reference... */
924 EXPORT_SYMBOL(bdi_register_owner);
927 * Remove bdi from bdi_list, and ensure that it is no longer visible
929 static void bdi_remove_from_list(struct backing_dev_info *bdi)
931 spin_lock_bh(&bdi_lock);
932 list_del_rcu(&bdi->bdi_list);
933 spin_unlock_bh(&bdi_lock);
935 synchronize_rcu_expedited();
938 void bdi_unregister(struct backing_dev_info *bdi)
940 /* make sure nobody finds us on the bdi_list anymore */
941 bdi_remove_from_list(bdi);
942 wb_shutdown(&bdi->wb);
943 cgwb_bdi_unregister(bdi);
946 bdi_debug_unregister(bdi);
947 device_unregister(bdi->dev);
952 put_device(bdi->owner);
957 static void release_bdi(struct kref *ref)
959 struct backing_dev_info *bdi =
960 container_of(ref, struct backing_dev_info, refcnt);
962 if (test_bit(WB_registered, &bdi->wb.state))
964 WARN_ON_ONCE(bdi->dev);
970 void bdi_put(struct backing_dev_info *bdi)
972 kref_put(&bdi->refcnt, release_bdi);
974 EXPORT_SYMBOL(bdi_put);
976 static wait_queue_head_t congestion_wqh[2] = {
977 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
978 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
980 static atomic_t nr_wb_congested[2];
982 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
984 wait_queue_head_t *wqh = &congestion_wqh[sync];
985 enum wb_congested_state bit;
987 bit = sync ? WB_sync_congested : WB_async_congested;
988 if (test_and_clear_bit(bit, &congested->state))
989 atomic_dec(&nr_wb_congested[sync]);
990 smp_mb__after_atomic();
991 if (waitqueue_active(wqh))
994 EXPORT_SYMBOL(clear_wb_congested);
996 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
998 enum wb_congested_state bit;
1000 bit = sync ? WB_sync_congested : WB_async_congested;
1001 if (!test_and_set_bit(bit, &congested->state))
1002 atomic_inc(&nr_wb_congested[sync]);
1004 EXPORT_SYMBOL(set_wb_congested);
1007 * congestion_wait - wait for a backing_dev to become uncongested
1008 * @sync: SYNC or ASYNC IO
1009 * @timeout: timeout in jiffies
1011 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1012 * write congestion. If no backing_devs are congested then just wait for the
1013 * next write to be completed.
1015 long congestion_wait(int sync, long timeout)
1018 unsigned long start = jiffies;
1020 wait_queue_head_t *wqh = &congestion_wqh[sync];
1022 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1023 ret = io_schedule_timeout(timeout);
1024 finish_wait(wqh, &wait);
1026 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1027 jiffies_to_usecs(jiffies - start));
1031 EXPORT_SYMBOL(congestion_wait);
1034 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1035 * @pgdat: A pgdat to check if it is heavily congested
1036 * @sync: SYNC or ASYNC IO
1037 * @timeout: timeout in jiffies
1039 * In the event of a congested backing_dev (any backing_dev) and the given
1040 * @pgdat has experienced recent congestion, this waits for up to @timeout
1041 * jiffies for either a BDI to exit congestion of the given @sync queue
1042 * or a write to complete.
1044 * In the absence of pgdat congestion, cond_resched() is called to yield
1045 * the processor if necessary but otherwise does not sleep.
1047 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1048 * it is the number of jiffies that were still remaining when the function
1049 * returned. return_value == timeout implies the function did not sleep.
1051 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1054 unsigned long start = jiffies;
1056 wait_queue_head_t *wqh = &congestion_wqh[sync];
1059 * If there is no congestion, or heavy congestion is not being
1060 * encountered in the current pgdat, yield if necessary instead
1061 * of sleeping on the congestion queue
1063 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1064 !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1067 /* In case we scheduled, work out time remaining */
1068 ret = timeout - (jiffies - start);
1075 /* Sleep until uncongested or a write happens */
1076 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1077 ret = io_schedule_timeout(timeout);
1078 finish_wait(wqh, &wait);
1081 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1082 jiffies_to_usecs(jiffies - start));
1086 EXPORT_SYMBOL(wait_iff_congested);
1088 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1089 void __user *buffer, size_t *lenp, loff_t *ppos)
1091 char kbuf[] = "0\n";
1093 if (*ppos || *lenp < sizeof(kbuf)) {
1098 if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1100 pr_warn_once("%s exported in /proc is scheduled for removal\n",