GNU Linux-libre 4.9.304-gnu1
[releases.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17 struct backing_dev_info noop_backing_dev_info = {
18         .name           = "noop",
19         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
20 };
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22
23 static struct class *bdi_class;
24 const char *bdi_unknown_name = "(unknown)";
25
26 /*
27  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
28  * locking.
29  */
30 DEFINE_SPINLOCK(bdi_lock);
31 LIST_HEAD(bdi_list);
32
33 /* bdi_wq serves all asynchronous writeback tasks */
34 struct workqueue_struct *bdi_wq;
35
36 #ifdef CONFIG_DEBUG_FS
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39
40 static struct dentry *bdi_debug_root;
41
42 static void bdi_debug_init(void)
43 {
44         bdi_debug_root = debugfs_create_dir("bdi", NULL);
45 }
46
47 static int bdi_debug_stats_show(struct seq_file *m, void *v)
48 {
49         struct backing_dev_info *bdi = m->private;
50         struct bdi_writeback *wb = &bdi->wb;
51         unsigned long background_thresh;
52         unsigned long dirty_thresh;
53         unsigned long wb_thresh;
54         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
55         struct inode *inode;
56
57         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
58         spin_lock(&wb->list_lock);
59         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
60                 nr_dirty++;
61         list_for_each_entry(inode, &wb->b_io, i_io_list)
62                 nr_io++;
63         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
64                 nr_more_io++;
65         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
66                 if (inode->i_state & I_DIRTY_TIME)
67                         nr_dirty_time++;
68         spin_unlock(&wb->list_lock);
69
70         global_dirty_limits(&background_thresh, &dirty_thresh);
71         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
72
73 #define K(x) ((x) << (PAGE_SHIFT - 10))
74         seq_printf(m,
75                    "BdiWriteback:       %10lu kB\n"
76                    "BdiReclaimable:     %10lu kB\n"
77                    "BdiDirtyThresh:     %10lu kB\n"
78                    "DirtyThresh:        %10lu kB\n"
79                    "BackgroundThresh:   %10lu kB\n"
80                    "BdiDirtied:         %10lu kB\n"
81                    "BdiWritten:         %10lu kB\n"
82                    "BdiWriteBandwidth:  %10lu kBps\n"
83                    "b_dirty:            %10lu\n"
84                    "b_io:               %10lu\n"
85                    "b_more_io:          %10lu\n"
86                    "b_dirty_time:       %10lu\n"
87                    "bdi_list:           %10u\n"
88                    "state:              %10lx\n",
89                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
90                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
91                    K(wb_thresh),
92                    K(dirty_thresh),
93                    K(background_thresh),
94                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
95                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
96                    (unsigned long) K(wb->write_bandwidth),
97                    nr_dirty,
98                    nr_io,
99                    nr_more_io,
100                    nr_dirty_time,
101                    !list_empty(&bdi->bdi_list), bdi->wb.state);
102 #undef K
103
104         return 0;
105 }
106
107 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
108 {
109         return single_open(file, bdi_debug_stats_show, inode->i_private);
110 }
111
112 static const struct file_operations bdi_debug_stats_fops = {
113         .open           = bdi_debug_stats_open,
114         .read           = seq_read,
115         .llseek         = seq_lseek,
116         .release        = single_release,
117 };
118
119 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
120 {
121         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
122         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
123                                                bdi, &bdi_debug_stats_fops);
124 }
125
126 static void bdi_debug_unregister(struct backing_dev_info *bdi)
127 {
128         debugfs_remove(bdi->debug_stats);
129         debugfs_remove(bdi->debug_dir);
130 }
131 #else
132 static inline void bdi_debug_init(void)
133 {
134 }
135 static inline void bdi_debug_register(struct backing_dev_info *bdi,
136                                       const char *name)
137 {
138 }
139 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
140 {
141 }
142 #endif
143
144 static ssize_t read_ahead_kb_store(struct device *dev,
145                                   struct device_attribute *attr,
146                                   const char *buf, size_t count)
147 {
148         struct backing_dev_info *bdi = dev_get_drvdata(dev);
149         unsigned long read_ahead_kb;
150         ssize_t ret;
151
152         ret = kstrtoul(buf, 10, &read_ahead_kb);
153         if (ret < 0)
154                 return ret;
155
156         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
157
158         return count;
159 }
160
161 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
162
163 #define BDI_SHOW(name, expr)                                            \
164 static ssize_t name##_show(struct device *dev,                          \
165                            struct device_attribute *attr, char *page)   \
166 {                                                                       \
167         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
168                                                                         \
169         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
170 }                                                                       \
171 static DEVICE_ATTR_RW(name);
172
173 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
174
175 static ssize_t min_ratio_store(struct device *dev,
176                 struct device_attribute *attr, const char *buf, size_t count)
177 {
178         struct backing_dev_info *bdi = dev_get_drvdata(dev);
179         unsigned int ratio;
180         ssize_t ret;
181
182         ret = kstrtouint(buf, 10, &ratio);
183         if (ret < 0)
184                 return ret;
185
186         ret = bdi_set_min_ratio(bdi, ratio);
187         if (!ret)
188                 ret = count;
189
190         return ret;
191 }
192 BDI_SHOW(min_ratio, bdi->min_ratio)
193
194 static ssize_t max_ratio_store(struct device *dev,
195                 struct device_attribute *attr, const char *buf, size_t count)
196 {
197         struct backing_dev_info *bdi = dev_get_drvdata(dev);
198         unsigned int ratio;
199         ssize_t ret;
200
201         ret = kstrtouint(buf, 10, &ratio);
202         if (ret < 0)
203                 return ret;
204
205         ret = bdi_set_max_ratio(bdi, ratio);
206         if (!ret)
207                 ret = count;
208
209         return ret;
210 }
211 BDI_SHOW(max_ratio, bdi->max_ratio)
212
213 static ssize_t stable_pages_required_show(struct device *dev,
214                                           struct device_attribute *attr,
215                                           char *page)
216 {
217         struct backing_dev_info *bdi = dev_get_drvdata(dev);
218
219         return snprintf(page, PAGE_SIZE-1, "%d\n",
220                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
221 }
222 static DEVICE_ATTR_RO(stable_pages_required);
223
224 static struct attribute *bdi_dev_attrs[] = {
225         &dev_attr_read_ahead_kb.attr,
226         &dev_attr_min_ratio.attr,
227         &dev_attr_max_ratio.attr,
228         &dev_attr_stable_pages_required.attr,
229         NULL,
230 };
231 ATTRIBUTE_GROUPS(bdi_dev);
232
233 static __init int bdi_class_init(void)
234 {
235         bdi_class = class_create(THIS_MODULE, "bdi");
236         if (IS_ERR(bdi_class))
237                 return PTR_ERR(bdi_class);
238
239         bdi_class->dev_groups = bdi_dev_groups;
240         bdi_debug_init();
241         return 0;
242 }
243 postcore_initcall(bdi_class_init);
244
245 static int __init default_bdi_init(void)
246 {
247         int err;
248
249         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250                                               WQ_UNBOUND | WQ_SYSFS, 0);
251         if (!bdi_wq)
252                 return -ENOMEM;
253
254         err = bdi_init(&noop_backing_dev_info);
255
256         return err;
257 }
258 subsys_initcall(default_bdi_init);
259
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276         unsigned long timeout;
277
278         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279         spin_lock_bh(&wb->work_lock);
280         if (test_bit(WB_registered, &wb->state))
281                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282         spin_unlock_bh(&wb->work_lock);
283 }
284
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
289
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291                    int blkcg_id, gfp_t gfp)
292 {
293         int i, err;
294
295         memset(wb, 0, sizeof(*wb));
296
297         wb->bdi = bdi;
298         wb->last_old_flush = jiffies;
299         INIT_LIST_HEAD(&wb->b_dirty);
300         INIT_LIST_HEAD(&wb->b_io);
301         INIT_LIST_HEAD(&wb->b_more_io);
302         INIT_LIST_HEAD(&wb->b_dirty_time);
303         spin_lock_init(&wb->list_lock);
304
305         wb->bw_time_stamp = jiffies;
306         wb->balanced_dirty_ratelimit = INIT_BW;
307         wb->dirty_ratelimit = INIT_BW;
308         wb->write_bandwidth = INIT_BW;
309         wb->avg_write_bandwidth = INIT_BW;
310
311         spin_lock_init(&wb->work_lock);
312         INIT_LIST_HEAD(&wb->work_list);
313         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
314
315         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
316         if (!wb->congested)
317                 return -ENOMEM;
318
319         err = fprop_local_init_percpu(&wb->completions, gfp);
320         if (err)
321                 goto out_put_cong;
322
323         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
324                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
325                 if (err)
326                         goto out_destroy_stat;
327         }
328
329         return 0;
330
331 out_destroy_stat:
332         while (i--)
333                 percpu_counter_destroy(&wb->stat[i]);
334         fprop_local_destroy_percpu(&wb->completions);
335 out_put_cong:
336         wb_congested_put(wb->congested);
337         return err;
338 }
339
340 /*
341  * Remove bdi from the global list and shutdown any threads we have running
342  */
343 static void wb_shutdown(struct bdi_writeback *wb)
344 {
345         /* Make sure nobody queues further work */
346         spin_lock_bh(&wb->work_lock);
347         if (!test_and_clear_bit(WB_registered, &wb->state)) {
348                 spin_unlock_bh(&wb->work_lock);
349                 return;
350         }
351         spin_unlock_bh(&wb->work_lock);
352
353         /*
354          * Drain work list and shutdown the delayed_work.  !WB_registered
355          * tells wb_workfn() that @wb is dying and its work_list needs to
356          * be drained no matter what.
357          */
358         mod_delayed_work(bdi_wq, &wb->dwork, 0);
359         flush_delayed_work(&wb->dwork);
360         WARN_ON(!list_empty(&wb->work_list));
361 }
362
363 static void wb_exit(struct bdi_writeback *wb)
364 {
365         int i;
366
367         WARN_ON(delayed_work_pending(&wb->dwork));
368
369         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
370                 percpu_counter_destroy(&wb->stat[i]);
371
372         fprop_local_destroy_percpu(&wb->completions);
373         wb_congested_put(wb->congested);
374 }
375
376 #ifdef CONFIG_CGROUP_WRITEBACK
377
378 #include <linux/memcontrol.h>
379
380 /*
381  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
382  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
383  * protected.  cgwb_release_wait is used to wait for the completion of cgwb
384  * releases from bdi destruction path.
385  */
386 static DEFINE_SPINLOCK(cgwb_lock);
387 static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
388
389 /**
390  * wb_congested_get_create - get or create a wb_congested
391  * @bdi: associated bdi
392  * @blkcg_id: ID of the associated blkcg
393  * @gfp: allocation mask
394  *
395  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
396  * The returned wb_congested has its reference count incremented.  Returns
397  * NULL on failure.
398  */
399 struct bdi_writeback_congested *
400 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
401 {
402         struct bdi_writeback_congested *new_congested = NULL, *congested;
403         struct rb_node **node, *parent;
404         unsigned long flags;
405 retry:
406         spin_lock_irqsave(&cgwb_lock, flags);
407
408         node = &bdi->cgwb_congested_tree.rb_node;
409         parent = NULL;
410
411         while (*node != NULL) {
412                 parent = *node;
413                 congested = container_of(parent, struct bdi_writeback_congested,
414                                          rb_node);
415                 if (congested->blkcg_id < blkcg_id)
416                         node = &parent->rb_left;
417                 else if (congested->blkcg_id > blkcg_id)
418                         node = &parent->rb_right;
419                 else
420                         goto found;
421         }
422
423         if (new_congested) {
424                 /* !found and storage for new one already allocated, insert */
425                 congested = new_congested;
426                 new_congested = NULL;
427                 rb_link_node(&congested->rb_node, parent, node);
428                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
429                 goto found;
430         }
431
432         spin_unlock_irqrestore(&cgwb_lock, flags);
433
434         /* allocate storage for new one and retry */
435         new_congested = kzalloc(sizeof(*new_congested), gfp);
436         if (!new_congested)
437                 return NULL;
438
439         atomic_set(&new_congested->refcnt, 0);
440         new_congested->bdi = bdi;
441         new_congested->blkcg_id = blkcg_id;
442         goto retry;
443
444 found:
445         atomic_inc(&congested->refcnt);
446         spin_unlock_irqrestore(&cgwb_lock, flags);
447         kfree(new_congested);
448         return congested;
449 }
450
451 /**
452  * wb_congested_put - put a wb_congested
453  * @congested: wb_congested to put
454  *
455  * Put @congested and destroy it if the refcnt reaches zero.
456  */
457 void wb_congested_put(struct bdi_writeback_congested *congested)
458 {
459         unsigned long flags;
460
461         local_irq_save(flags);
462         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
463                 local_irq_restore(flags);
464                 return;
465         }
466
467         /* bdi might already have been destroyed leaving @congested unlinked */
468         if (congested->bdi) {
469                 rb_erase(&congested->rb_node,
470                          &congested->bdi->cgwb_congested_tree);
471                 congested->bdi = NULL;
472         }
473
474         spin_unlock_irqrestore(&cgwb_lock, flags);
475         kfree(congested);
476 }
477
478 static void cgwb_release_workfn(struct work_struct *work)
479 {
480         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
481                                                 release_work);
482         struct backing_dev_info *bdi = wb->bdi;
483
484         spin_lock_irq(&cgwb_lock);
485         list_del_rcu(&wb->bdi_node);
486         spin_unlock_irq(&cgwb_lock);
487
488         wb_shutdown(wb);
489
490         css_put(wb->memcg_css);
491         css_put(wb->blkcg_css);
492
493         fprop_local_destroy_percpu(&wb->memcg_completions);
494         percpu_ref_exit(&wb->refcnt);
495         wb_exit(wb);
496         kfree_rcu(wb, rcu);
497
498         if (atomic_dec_and_test(&bdi->usage_cnt))
499                 wake_up_all(&cgwb_release_wait);
500 }
501
502 static void cgwb_release(struct percpu_ref *refcnt)
503 {
504         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
505                                                 refcnt);
506         schedule_work(&wb->release_work);
507 }
508
509 static void cgwb_kill(struct bdi_writeback *wb)
510 {
511         lockdep_assert_held(&cgwb_lock);
512
513         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
514         list_del(&wb->memcg_node);
515         list_del(&wb->blkcg_node);
516         percpu_ref_kill(&wb->refcnt);
517 }
518
519 static int cgwb_create(struct backing_dev_info *bdi,
520                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
521 {
522         struct mem_cgroup *memcg;
523         struct cgroup_subsys_state *blkcg_css;
524         struct blkcg *blkcg;
525         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
526         struct bdi_writeback *wb;
527         unsigned long flags;
528         int ret = 0;
529
530         memcg = mem_cgroup_from_css(memcg_css);
531         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
532         blkcg = css_to_blkcg(blkcg_css);
533         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
534         blkcg_cgwb_list = &blkcg->cgwb_list;
535
536         /* look up again under lock and discard on blkcg mismatch */
537         spin_lock_irqsave(&cgwb_lock, flags);
538         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
539         if (wb && wb->blkcg_css != blkcg_css) {
540                 cgwb_kill(wb);
541                 wb = NULL;
542         }
543         spin_unlock_irqrestore(&cgwb_lock, flags);
544         if (wb)
545                 goto out_put;
546
547         /* need to create a new one */
548         wb = kmalloc(sizeof(*wb), gfp);
549         if (!wb)
550                 return -ENOMEM;
551
552         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
553         if (ret)
554                 goto err_free;
555
556         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
557         if (ret)
558                 goto err_wb_exit;
559
560         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
561         if (ret)
562                 goto err_ref_exit;
563
564         wb->memcg_css = memcg_css;
565         wb->blkcg_css = blkcg_css;
566         INIT_WORK(&wb->release_work, cgwb_release_workfn);
567         set_bit(WB_registered, &wb->state);
568
569         /*
570          * The root wb determines the registered state of the whole bdi and
571          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
572          * whether they're still online.  Don't link @wb if any is dead.
573          * See wb_memcg_offline() and wb_blkcg_offline().
574          */
575         ret = -ENODEV;
576         spin_lock_irqsave(&cgwb_lock, flags);
577         if (test_bit(WB_registered, &bdi->wb.state) &&
578             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
579                 /* we might have raced another instance of this function */
580                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
581                 if (!ret) {
582                         atomic_inc(&bdi->usage_cnt);
583                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
584                         list_add(&wb->memcg_node, memcg_cgwb_list);
585                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
586                         css_get(memcg_css);
587                         css_get(blkcg_css);
588                 }
589         }
590         spin_unlock_irqrestore(&cgwb_lock, flags);
591         if (ret) {
592                 if (ret == -EEXIST)
593                         ret = 0;
594                 goto err_fprop_exit;
595         }
596         goto out_put;
597
598 err_fprop_exit:
599         fprop_local_destroy_percpu(&wb->memcg_completions);
600 err_ref_exit:
601         percpu_ref_exit(&wb->refcnt);
602 err_wb_exit:
603         wb_exit(wb);
604 err_free:
605         kfree(wb);
606 out_put:
607         css_put(blkcg_css);
608         return ret;
609 }
610
611 /**
612  * wb_get_create - get wb for a given memcg, create if necessary
613  * @bdi: target bdi
614  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
615  * @gfp: allocation mask to use
616  *
617  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
618  * create one.  The returned wb has its refcount incremented.
619  *
620  * This function uses css_get() on @memcg_css and thus expects its refcnt
621  * to be positive on invocation.  IOW, rcu_read_lock() protection on
622  * @memcg_css isn't enough.  try_get it before calling this function.
623  *
624  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
625  * memcg on the default hierarchy, memcg association is guaranteed to be
626  * more specific (equal or descendant to the associated blkcg) and thus can
627  * identify both the memcg and blkcg associations.
628  *
629  * Because the blkcg associated with a memcg may change as blkcg is enabled
630  * and disabled closer to root in the hierarchy, each wb keeps track of
631  * both the memcg and blkcg associated with it and verifies the blkcg on
632  * each lookup.  On mismatch, the existing wb is discarded and a new one is
633  * created.
634  */
635 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
636                                     struct cgroup_subsys_state *memcg_css,
637                                     gfp_t gfp)
638 {
639         struct bdi_writeback *wb;
640
641         might_sleep_if(gfpflags_allow_blocking(gfp));
642
643         if (!memcg_css->parent)
644                 return &bdi->wb;
645
646         do {
647                 rcu_read_lock();
648                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
649                 if (wb) {
650                         struct cgroup_subsys_state *blkcg_css;
651
652                         /* see whether the blkcg association has changed */
653                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
654                                                      &io_cgrp_subsys);
655                         if (unlikely(wb->blkcg_css != blkcg_css ||
656                                      !wb_tryget(wb)))
657                                 wb = NULL;
658                         css_put(blkcg_css);
659                 }
660                 rcu_read_unlock();
661         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
662
663         return wb;
664 }
665
666 static int cgwb_bdi_init(struct backing_dev_info *bdi)
667 {
668         int ret;
669
670         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
671         bdi->cgwb_congested_tree = RB_ROOT;
672         atomic_set(&bdi->usage_cnt, 1);
673         init_rwsem(&bdi->wb_switch_rwsem);
674
675         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
676         if (!ret) {
677                 bdi->wb.memcg_css = &root_mem_cgroup->css;
678                 bdi->wb.blkcg_css = blkcg_root_css;
679         }
680         return ret;
681 }
682
683 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
684 {
685         struct radix_tree_iter iter;
686         struct rb_node *rbn;
687         void **slot;
688
689         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
690
691         spin_lock_irq(&cgwb_lock);
692
693         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
694                 cgwb_kill(*slot);
695
696         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
697                 struct bdi_writeback_congested *congested =
698                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
699
700                 rb_erase(rbn, &bdi->cgwb_congested_tree);
701                 congested->bdi = NULL;  /* mark @congested unlinked */
702         }
703
704         spin_unlock_irq(&cgwb_lock);
705
706         /*
707          * All cgwb's and their congested states must be shutdown and
708          * released before returning.  Drain the usage counter to wait for
709          * all cgwb's and cgwb_congested's ever created on @bdi.
710          */
711         atomic_dec(&bdi->usage_cnt);
712         wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
713 }
714
715 /**
716  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
717  * @memcg: memcg being offlined
718  *
719  * Also prevents creation of any new wb's associated with @memcg.
720  */
721 void wb_memcg_offline(struct mem_cgroup *memcg)
722 {
723         LIST_HEAD(to_destroy);
724         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
725         struct bdi_writeback *wb, *next;
726
727         spin_lock_irq(&cgwb_lock);
728         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
729                 cgwb_kill(wb);
730         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
731         spin_unlock_irq(&cgwb_lock);
732 }
733
734 /**
735  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
736  * @blkcg: blkcg being offlined
737  *
738  * Also prevents creation of any new wb's associated with @blkcg.
739  */
740 void wb_blkcg_offline(struct blkcg *blkcg)
741 {
742         LIST_HEAD(to_destroy);
743         struct bdi_writeback *wb, *next;
744
745         spin_lock_irq(&cgwb_lock);
746         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
747                 cgwb_kill(wb);
748         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
749         spin_unlock_irq(&cgwb_lock);
750 }
751
752 #else   /* CONFIG_CGROUP_WRITEBACK */
753
754 static int cgwb_bdi_init(struct backing_dev_info *bdi)
755 {
756         int err;
757
758         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
759         if (!bdi->wb_congested)
760                 return -ENOMEM;
761
762         atomic_set(&bdi->wb_congested->refcnt, 1);
763
764         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
765         if (err) {
766                 wb_congested_put(bdi->wb_congested);
767                 return err;
768         }
769         return 0;
770 }
771
772 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
773 {
774         wb_congested_put(bdi->wb_congested);
775 }
776
777 #endif  /* CONFIG_CGROUP_WRITEBACK */
778
779 int bdi_init(struct backing_dev_info *bdi)
780 {
781         int ret;
782
783         bdi->dev = NULL;
784
785         bdi->min_ratio = 0;
786         bdi->max_ratio = 100;
787         bdi->max_prop_frac = FPROP_FRAC_BASE;
788         INIT_LIST_HEAD(&bdi->bdi_list);
789         INIT_LIST_HEAD(&bdi->wb_list);
790         init_waitqueue_head(&bdi->wb_waitq);
791
792         ret = cgwb_bdi_init(bdi);
793
794         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
795
796         return ret;
797 }
798 EXPORT_SYMBOL(bdi_init);
799
800 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
801                 const char *fmt, ...)
802 {
803         va_list args;
804         struct device *dev;
805
806         if (bdi->dev)   /* The driver needs to use separate queues per device */
807                 return 0;
808
809         va_start(args, fmt);
810         dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
811         va_end(args);
812         if (IS_ERR(dev))
813                 return PTR_ERR(dev);
814
815         bdi->dev = dev;
816
817         bdi_debug_register(bdi, dev_name(dev));
818         set_bit(WB_registered, &bdi->wb.state);
819
820         spin_lock_bh(&bdi_lock);
821         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
822         spin_unlock_bh(&bdi_lock);
823
824         trace_writeback_bdi_register(bdi);
825         return 0;
826 }
827 EXPORT_SYMBOL(bdi_register);
828
829 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
830 {
831         return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
832 }
833 EXPORT_SYMBOL(bdi_register_dev);
834
835 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
836 {
837         int rc;
838
839         rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
840                         MINOR(owner->devt));
841         if (rc)
842                 return rc;
843         bdi->owner = owner;
844         get_device(owner);
845         return 0;
846 }
847 EXPORT_SYMBOL(bdi_register_owner);
848
849 /*
850  * Remove bdi from bdi_list, and ensure that it is no longer visible
851  */
852 static void bdi_remove_from_list(struct backing_dev_info *bdi)
853 {
854         spin_lock_bh(&bdi_lock);
855         list_del_rcu(&bdi->bdi_list);
856         spin_unlock_bh(&bdi_lock);
857
858         synchronize_rcu_expedited();
859 }
860
861 void bdi_unregister(struct backing_dev_info *bdi)
862 {
863         /* make sure nobody finds us on the bdi_list anymore */
864         bdi_remove_from_list(bdi);
865         wb_shutdown(&bdi->wb);
866         cgwb_bdi_destroy(bdi);
867
868         /*
869          * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
870          * update the global bdi_min_ratio.
871          */
872         if (bdi->min_ratio)
873                 bdi_set_min_ratio(bdi, 0);
874
875         if (bdi->dev) {
876                 bdi_debug_unregister(bdi);
877                 device_unregister(bdi->dev);
878                 bdi->dev = NULL;
879         }
880
881         if (bdi->owner) {
882                 put_device(bdi->owner);
883                 bdi->owner = NULL;
884         }
885 }
886
887 void bdi_exit(struct backing_dev_info *bdi)
888 {
889         WARN_ON_ONCE(bdi->dev);
890         wb_exit(&bdi->wb);
891 }
892
893 void bdi_destroy(struct backing_dev_info *bdi)
894 {
895         bdi_unregister(bdi);
896         bdi_exit(bdi);
897 }
898 EXPORT_SYMBOL(bdi_destroy);
899
900 /*
901  * For use from filesystems to quickly init and register a bdi associated
902  * with dirty writeback
903  */
904 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
905 {
906         int err;
907
908         bdi->name = name;
909         bdi->capabilities = 0;
910         err = bdi_init(bdi);
911         if (err)
912                 return err;
913
914         err = bdi_register(bdi, NULL, "%.28s-%ld", name,
915                            atomic_long_inc_return(&bdi_seq));
916         if (err) {
917                 bdi_destroy(bdi);
918                 return err;
919         }
920
921         return 0;
922 }
923 EXPORT_SYMBOL(bdi_setup_and_register);
924
925 static wait_queue_head_t congestion_wqh[2] = {
926                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
927                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
928         };
929 static atomic_t nr_wb_congested[2];
930
931 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
932 {
933         wait_queue_head_t *wqh = &congestion_wqh[sync];
934         enum wb_congested_state bit;
935
936         bit = sync ? WB_sync_congested : WB_async_congested;
937         if (test_and_clear_bit(bit, &congested->state))
938                 atomic_dec(&nr_wb_congested[sync]);
939         smp_mb__after_atomic();
940         if (waitqueue_active(wqh))
941                 wake_up(wqh);
942 }
943 EXPORT_SYMBOL(clear_wb_congested);
944
945 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
946 {
947         enum wb_congested_state bit;
948
949         bit = sync ? WB_sync_congested : WB_async_congested;
950         if (!test_and_set_bit(bit, &congested->state))
951                 atomic_inc(&nr_wb_congested[sync]);
952 }
953 EXPORT_SYMBOL(set_wb_congested);
954
955 /**
956  * congestion_wait - wait for a backing_dev to become uncongested
957  * @sync: SYNC or ASYNC IO
958  * @timeout: timeout in jiffies
959  *
960  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
961  * write congestion.  If no backing_devs are congested then just wait for the
962  * next write to be completed.
963  */
964 long congestion_wait(int sync, long timeout)
965 {
966         long ret;
967         unsigned long start = jiffies;
968         DEFINE_WAIT(wait);
969         wait_queue_head_t *wqh = &congestion_wqh[sync];
970
971         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
972         ret = io_schedule_timeout(timeout);
973         finish_wait(wqh, &wait);
974
975         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
976                                         jiffies_to_usecs(jiffies - start));
977
978         return ret;
979 }
980 EXPORT_SYMBOL(congestion_wait);
981
982 /**
983  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
984  * @pgdat: A pgdat to check if it is heavily congested
985  * @sync: SYNC or ASYNC IO
986  * @timeout: timeout in jiffies
987  *
988  * In the event of a congested backing_dev (any backing_dev) and the given
989  * @pgdat has experienced recent congestion, this waits for up to @timeout
990  * jiffies for either a BDI to exit congestion of the given @sync queue
991  * or a write to complete.
992  *
993  * In the absence of pgdat congestion, cond_resched() is called to yield
994  * the processor if necessary but otherwise does not sleep.
995  *
996  * The return value is 0 if the sleep is for the full timeout. Otherwise,
997  * it is the number of jiffies that were still remaining when the function
998  * returned. return_value == timeout implies the function did not sleep.
999  */
1000 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1001 {
1002         long ret;
1003         unsigned long start = jiffies;
1004         DEFINE_WAIT(wait);
1005         wait_queue_head_t *wqh = &congestion_wqh[sync];
1006
1007         /*
1008          * If there is no congestion, or heavy congestion is not being
1009          * encountered in the current pgdat, yield if necessary instead
1010          * of sleeping on the congestion queue
1011          */
1012         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1013             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1014                 cond_resched();
1015
1016                 /* In case we scheduled, work out time remaining */
1017                 ret = timeout - (jiffies - start);
1018                 if (ret < 0)
1019                         ret = 0;
1020
1021                 goto out;
1022         }
1023
1024         /* Sleep until uncongested or a write happens */
1025         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1026         ret = io_schedule_timeout(timeout);
1027         finish_wait(wqh, &wait);
1028
1029 out:
1030         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1031                                         jiffies_to_usecs(jiffies - start));
1032
1033         return ret;
1034 }
1035 EXPORT_SYMBOL(wait_iff_congested);
1036
1037 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1038                         void __user *buffer, size_t *lenp, loff_t *ppos)
1039 {
1040         char kbuf[] = "0\n";
1041
1042         if (*ppos || *lenp < sizeof(kbuf)) {
1043                 *lenp = 0;
1044                 return 0;
1045         }
1046
1047         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1048                 return -EFAULT;
1049         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1050                      table->procname);
1051
1052         *lenp = 2;
1053         *ppos += *lenp;
1054         return 2;
1055 }