GNU Linux-libre 4.14.332-gnu1
[releases.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 struct backing_dev_info noop_backing_dev_info = {
16         .name           = "noop",
17         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
18 };
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
20
21 static struct class *bdi_class;
22 const char *bdi_unknown_name = "(unknown)";
23
24 /*
25  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
26  * locking.
27  */
28 DEFINE_SPINLOCK(bdi_lock);
29 LIST_HEAD(bdi_list);
30
31 /* bdi_wq serves all asynchronous writeback tasks */
32 struct workqueue_struct *bdi_wq;
33
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37
38 static struct dentry *bdi_debug_root;
39
40 static void bdi_debug_init(void)
41 {
42         bdi_debug_root = debugfs_create_dir("bdi", NULL);
43 }
44
45 static int bdi_debug_stats_show(struct seq_file *m, void *v)
46 {
47         struct backing_dev_info *bdi = m->private;
48         struct bdi_writeback *wb = &bdi->wb;
49         unsigned long background_thresh;
50         unsigned long dirty_thresh;
51         unsigned long wb_thresh;
52         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
53         struct inode *inode;
54
55         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
56         spin_lock(&wb->list_lock);
57         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
58                 nr_dirty++;
59         list_for_each_entry(inode, &wb->b_io, i_io_list)
60                 nr_io++;
61         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
62                 nr_more_io++;
63         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
64                 if (inode->i_state & I_DIRTY_TIME)
65                         nr_dirty_time++;
66         spin_unlock(&wb->list_lock);
67
68         global_dirty_limits(&background_thresh, &dirty_thresh);
69         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
70
71 #define K(x) ((x) << (PAGE_SHIFT - 10))
72         seq_printf(m,
73                    "BdiWriteback:       %10lu kB\n"
74                    "BdiReclaimable:     %10lu kB\n"
75                    "BdiDirtyThresh:     %10lu kB\n"
76                    "DirtyThresh:        %10lu kB\n"
77                    "BackgroundThresh:   %10lu kB\n"
78                    "BdiDirtied:         %10lu kB\n"
79                    "BdiWritten:         %10lu kB\n"
80                    "BdiWriteBandwidth:  %10lu kBps\n"
81                    "b_dirty:            %10lu\n"
82                    "b_io:               %10lu\n"
83                    "b_more_io:          %10lu\n"
84                    "b_dirty_time:       %10lu\n"
85                    "bdi_list:           %10u\n"
86                    "state:              %10lx\n",
87                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
88                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
89                    K(wb_thresh),
90                    K(dirty_thresh),
91                    K(background_thresh),
92                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
93                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
94                    (unsigned long) K(wb->write_bandwidth),
95                    nr_dirty,
96                    nr_io,
97                    nr_more_io,
98                    nr_dirty_time,
99                    !list_empty(&bdi->bdi_list), bdi->wb.state);
100 #undef K
101
102         return 0;
103 }
104
105 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
106 {
107         return single_open(file, bdi_debug_stats_show, inode->i_private);
108 }
109
110 static const struct file_operations bdi_debug_stats_fops = {
111         .open           = bdi_debug_stats_open,
112         .read           = seq_read,
113         .llseek         = seq_lseek,
114         .release        = single_release,
115 };
116
117 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
118 {
119         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
120         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
121                                                bdi, &bdi_debug_stats_fops);
122 }
123
124 static void bdi_debug_unregister(struct backing_dev_info *bdi)
125 {
126         debugfs_remove(bdi->debug_stats);
127         debugfs_remove(bdi->debug_dir);
128 }
129 #else
130 static inline void bdi_debug_init(void)
131 {
132 }
133 static inline void bdi_debug_register(struct backing_dev_info *bdi,
134                                       const char *name)
135 {
136 }
137 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
138 {
139 }
140 #endif
141
142 static ssize_t read_ahead_kb_store(struct device *dev,
143                                   struct device_attribute *attr,
144                                   const char *buf, size_t count)
145 {
146         struct backing_dev_info *bdi = dev_get_drvdata(dev);
147         unsigned long read_ahead_kb;
148         ssize_t ret;
149
150         ret = kstrtoul(buf, 10, &read_ahead_kb);
151         if (ret < 0)
152                 return ret;
153
154         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
155
156         return count;
157 }
158
159 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
160
161 #define BDI_SHOW(name, expr)                                            \
162 static ssize_t name##_show(struct device *dev,                          \
163                            struct device_attribute *attr, char *page)   \
164 {                                                                       \
165         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
166                                                                         \
167         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
168 }                                                                       \
169 static DEVICE_ATTR_RW(name);
170
171 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
172
173 static ssize_t min_ratio_store(struct device *dev,
174                 struct device_attribute *attr, const char *buf, size_t count)
175 {
176         struct backing_dev_info *bdi = dev_get_drvdata(dev);
177         unsigned int ratio;
178         ssize_t ret;
179
180         ret = kstrtouint(buf, 10, &ratio);
181         if (ret < 0)
182                 return ret;
183
184         ret = bdi_set_min_ratio(bdi, ratio);
185         if (!ret)
186                 ret = count;
187
188         return ret;
189 }
190 BDI_SHOW(min_ratio, bdi->min_ratio)
191
192 static ssize_t max_ratio_store(struct device *dev,
193                 struct device_attribute *attr, const char *buf, size_t count)
194 {
195         struct backing_dev_info *bdi = dev_get_drvdata(dev);
196         unsigned int ratio;
197         ssize_t ret;
198
199         ret = kstrtouint(buf, 10, &ratio);
200         if (ret < 0)
201                 return ret;
202
203         ret = bdi_set_max_ratio(bdi, ratio);
204         if (!ret)
205                 ret = count;
206
207         return ret;
208 }
209 BDI_SHOW(max_ratio, bdi->max_ratio)
210
211 static ssize_t stable_pages_required_show(struct device *dev,
212                                           struct device_attribute *attr,
213                                           char *page)
214 {
215         struct backing_dev_info *bdi = dev_get_drvdata(dev);
216
217         return snprintf(page, PAGE_SIZE-1, "%d\n",
218                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
219 }
220 static DEVICE_ATTR_RO(stable_pages_required);
221
222 static struct attribute *bdi_dev_attrs[] = {
223         &dev_attr_read_ahead_kb.attr,
224         &dev_attr_min_ratio.attr,
225         &dev_attr_max_ratio.attr,
226         &dev_attr_stable_pages_required.attr,
227         NULL,
228 };
229 ATTRIBUTE_GROUPS(bdi_dev);
230
231 static __init int bdi_class_init(void)
232 {
233         bdi_class = class_create(THIS_MODULE, "bdi");
234         if (IS_ERR(bdi_class))
235                 return PTR_ERR(bdi_class);
236
237         bdi_class->dev_groups = bdi_dev_groups;
238         bdi_debug_init();
239
240         return 0;
241 }
242 postcore_initcall(bdi_class_init);
243
244 static int bdi_init(struct backing_dev_info *bdi);
245
246 static int __init default_bdi_init(void)
247 {
248         int err;
249
250         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
251                                  WQ_SYSFS, 0);
252         if (!bdi_wq)
253                 return -ENOMEM;
254
255         err = bdi_init(&noop_backing_dev_info);
256
257         return err;
258 }
259 subsys_initcall(default_bdi_init);
260
261 /*
262  * This function is used when the first inode for this wb is marked dirty. It
263  * wakes-up the corresponding bdi thread which should then take care of the
264  * periodic background write-out of dirty inodes. Since the write-out would
265  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
266  * set up a timer which wakes the bdi thread up later.
267  *
268  * Note, we wouldn't bother setting up the timer, but this function is on the
269  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
270  * by delaying the wake-up.
271  *
272  * We have to be careful not to postpone flush work if it is scheduled for
273  * earlier. Thus we use queue_delayed_work().
274  */
275 void wb_wakeup_delayed(struct bdi_writeback *wb)
276 {
277         unsigned long timeout;
278
279         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
280         spin_lock_bh(&wb->work_lock);
281         if (test_bit(WB_registered, &wb->state))
282                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
283         spin_unlock_bh(&wb->work_lock);
284 }
285
286 /*
287  * Initial write bandwidth: 100 MB/s
288  */
289 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
290
291 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
292                    int blkcg_id, gfp_t gfp)
293 {
294         int i, err;
295
296         memset(wb, 0, sizeof(*wb));
297
298         if (wb != &bdi->wb)
299                 bdi_get(bdi);
300         wb->bdi = bdi;
301         wb->last_old_flush = jiffies;
302         INIT_LIST_HEAD(&wb->b_dirty);
303         INIT_LIST_HEAD(&wb->b_io);
304         INIT_LIST_HEAD(&wb->b_more_io);
305         INIT_LIST_HEAD(&wb->b_dirty_time);
306         spin_lock_init(&wb->list_lock);
307
308         wb->bw_time_stamp = jiffies;
309         wb->balanced_dirty_ratelimit = INIT_BW;
310         wb->dirty_ratelimit = INIT_BW;
311         wb->write_bandwidth = INIT_BW;
312         wb->avg_write_bandwidth = INIT_BW;
313
314         spin_lock_init(&wb->work_lock);
315         INIT_LIST_HEAD(&wb->work_list);
316         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
317         wb->dirty_sleep = jiffies;
318
319         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
320         if (!wb->congested) {
321                 err = -ENOMEM;
322                 goto out_put_bdi;
323         }
324
325         err = fprop_local_init_percpu(&wb->completions, gfp);
326         if (err)
327                 goto out_put_cong;
328
329         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
330                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
331                 if (err)
332                         goto out_destroy_stat;
333         }
334
335         return 0;
336
337 out_destroy_stat:
338         while (i--)
339                 percpu_counter_destroy(&wb->stat[i]);
340         fprop_local_destroy_percpu(&wb->completions);
341 out_put_cong:
342         wb_congested_put(wb->congested);
343 out_put_bdi:
344         if (wb != &bdi->wb)
345                 bdi_put(bdi);
346         return err;
347 }
348
349 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
350
351 /*
352  * Remove bdi from the global list and shutdown any threads we have running
353  */
354 static void wb_shutdown(struct bdi_writeback *wb)
355 {
356         /* Make sure nobody queues further work */
357         spin_lock_bh(&wb->work_lock);
358         if (!test_and_clear_bit(WB_registered, &wb->state)) {
359                 spin_unlock_bh(&wb->work_lock);
360                 return;
361         }
362         spin_unlock_bh(&wb->work_lock);
363
364         cgwb_remove_from_bdi_list(wb);
365         /*
366          * Drain work list and shutdown the delayed_work.  !WB_registered
367          * tells wb_workfn() that @wb is dying and its work_list needs to
368          * be drained no matter what.
369          */
370         mod_delayed_work(bdi_wq, &wb->dwork, 0);
371         flush_delayed_work(&wb->dwork);
372         WARN_ON(!list_empty(&wb->work_list));
373 }
374
375 static void wb_exit(struct bdi_writeback *wb)
376 {
377         int i;
378
379         WARN_ON(delayed_work_pending(&wb->dwork));
380
381         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
382                 percpu_counter_destroy(&wb->stat[i]);
383
384         fprop_local_destroy_percpu(&wb->completions);
385         wb_congested_put(wb->congested);
386         if (wb != &wb->bdi->wb)
387                 bdi_put(wb->bdi);
388 }
389
390 #ifdef CONFIG_CGROUP_WRITEBACK
391
392 #include <linux/memcontrol.h>
393
394 /*
395  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
396  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
397  * protected.
398  */
399 static DEFINE_SPINLOCK(cgwb_lock);
400 static struct workqueue_struct *cgwb_release_wq;
401
402 /**
403  * wb_congested_get_create - get or create a wb_congested
404  * @bdi: associated bdi
405  * @blkcg_id: ID of the associated blkcg
406  * @gfp: allocation mask
407  *
408  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
409  * The returned wb_congested has its reference count incremented.  Returns
410  * NULL on failure.
411  */
412 struct bdi_writeback_congested *
413 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
414 {
415         struct bdi_writeback_congested *new_congested = NULL, *congested;
416         struct rb_node **node, *parent;
417         unsigned long flags;
418 retry:
419         spin_lock_irqsave(&cgwb_lock, flags);
420
421         node = &bdi->cgwb_congested_tree.rb_node;
422         parent = NULL;
423
424         while (*node != NULL) {
425                 parent = *node;
426                 congested = rb_entry(parent, struct bdi_writeback_congested,
427                                      rb_node);
428                 if (congested->blkcg_id < blkcg_id)
429                         node = &parent->rb_left;
430                 else if (congested->blkcg_id > blkcg_id)
431                         node = &parent->rb_right;
432                 else
433                         goto found;
434         }
435
436         if (new_congested) {
437                 /* !found and storage for new one already allocated, insert */
438                 congested = new_congested;
439                 new_congested = NULL;
440                 rb_link_node(&congested->rb_node, parent, node);
441                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
442                 goto found;
443         }
444
445         spin_unlock_irqrestore(&cgwb_lock, flags);
446
447         /* allocate storage for new one and retry */
448         new_congested = kzalloc(sizeof(*new_congested), gfp);
449         if (!new_congested)
450                 return NULL;
451
452         atomic_set(&new_congested->refcnt, 0);
453         new_congested->__bdi = bdi;
454         new_congested->blkcg_id = blkcg_id;
455         goto retry;
456
457 found:
458         atomic_inc(&congested->refcnt);
459         spin_unlock_irqrestore(&cgwb_lock, flags);
460         kfree(new_congested);
461         return congested;
462 }
463
464 /**
465  * wb_congested_put - put a wb_congested
466  * @congested: wb_congested to put
467  *
468  * Put @congested and destroy it if the refcnt reaches zero.
469  */
470 void wb_congested_put(struct bdi_writeback_congested *congested)
471 {
472         unsigned long flags;
473
474         local_irq_save(flags);
475         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
476                 local_irq_restore(flags);
477                 return;
478         }
479
480         /* bdi might already have been destroyed leaving @congested unlinked */
481         if (congested->__bdi) {
482                 rb_erase(&congested->rb_node,
483                          &congested->__bdi->cgwb_congested_tree);
484                 congested->__bdi = NULL;
485         }
486
487         spin_unlock_irqrestore(&cgwb_lock, flags);
488         kfree(congested);
489 }
490
491 static void cgwb_release_workfn(struct work_struct *work)
492 {
493         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
494                                                 release_work);
495
496         mutex_lock(&wb->bdi->cgwb_release_mutex);
497         wb_shutdown(wb);
498
499         css_put(wb->memcg_css);
500         css_put(wb->blkcg_css);
501         mutex_unlock(&wb->bdi->cgwb_release_mutex);
502
503         fprop_local_destroy_percpu(&wb->memcg_completions);
504         percpu_ref_exit(&wb->refcnt);
505         wb_exit(wb);
506         kfree_rcu(wb, rcu);
507 }
508
509 static void cgwb_release(struct percpu_ref *refcnt)
510 {
511         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
512                                                 refcnt);
513         queue_work(cgwb_release_wq, &wb->release_work);
514 }
515
516 static void cgwb_kill(struct bdi_writeback *wb)
517 {
518         lockdep_assert_held(&cgwb_lock);
519
520         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
521         list_del(&wb->memcg_node);
522         list_del(&wb->blkcg_node);
523         percpu_ref_kill(&wb->refcnt);
524 }
525
526 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
527 {
528         spin_lock_irq(&cgwb_lock);
529         list_del_rcu(&wb->bdi_node);
530         spin_unlock_irq(&cgwb_lock);
531 }
532
533 static int cgwb_create(struct backing_dev_info *bdi,
534                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
535 {
536         struct mem_cgroup *memcg;
537         struct cgroup_subsys_state *blkcg_css;
538         struct blkcg *blkcg;
539         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
540         struct bdi_writeback *wb;
541         unsigned long flags;
542         int ret = 0;
543
544         memcg = mem_cgroup_from_css(memcg_css);
545         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
546         blkcg = css_to_blkcg(blkcg_css);
547         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
548         blkcg_cgwb_list = &blkcg->cgwb_list;
549
550         /* look up again under lock and discard on blkcg mismatch */
551         spin_lock_irqsave(&cgwb_lock, flags);
552         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
553         if (wb && wb->blkcg_css != blkcg_css) {
554                 cgwb_kill(wb);
555                 wb = NULL;
556         }
557         spin_unlock_irqrestore(&cgwb_lock, flags);
558         if (wb)
559                 goto out_put;
560
561         /* need to create a new one */
562         wb = kmalloc(sizeof(*wb), gfp);
563         if (!wb) {
564                 ret = -ENOMEM;
565                 goto out_put;
566         }
567
568         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
569         if (ret)
570                 goto err_free;
571
572         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
573         if (ret)
574                 goto err_wb_exit;
575
576         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
577         if (ret)
578                 goto err_ref_exit;
579
580         wb->memcg_css = memcg_css;
581         wb->blkcg_css = blkcg_css;
582         INIT_WORK(&wb->release_work, cgwb_release_workfn);
583         set_bit(WB_registered, &wb->state);
584
585         /*
586          * The root wb determines the registered state of the whole bdi and
587          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
588          * whether they're still online.  Don't link @wb if any is dead.
589          * See wb_memcg_offline() and wb_blkcg_offline().
590          */
591         ret = -ENODEV;
592         spin_lock_irqsave(&cgwb_lock, flags);
593         if (test_bit(WB_registered, &bdi->wb.state) &&
594             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
595                 /* we might have raced another instance of this function */
596                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
597                 if (!ret) {
598                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
599                         list_add(&wb->memcg_node, memcg_cgwb_list);
600                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
601                         css_get(memcg_css);
602                         css_get(blkcg_css);
603                 }
604         }
605         spin_unlock_irqrestore(&cgwb_lock, flags);
606         if (ret) {
607                 if (ret == -EEXIST)
608                         ret = 0;
609                 goto err_fprop_exit;
610         }
611         goto out_put;
612
613 err_fprop_exit:
614         fprop_local_destroy_percpu(&wb->memcg_completions);
615 err_ref_exit:
616         percpu_ref_exit(&wb->refcnt);
617 err_wb_exit:
618         wb_exit(wb);
619 err_free:
620         kfree(wb);
621 out_put:
622         css_put(blkcg_css);
623         return ret;
624 }
625
626 /**
627  * wb_get_create - get wb for a given memcg, create if necessary
628  * @bdi: target bdi
629  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
630  * @gfp: allocation mask to use
631  *
632  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
633  * create one.  The returned wb has its refcount incremented.
634  *
635  * This function uses css_get() on @memcg_css and thus expects its refcnt
636  * to be positive on invocation.  IOW, rcu_read_lock() protection on
637  * @memcg_css isn't enough.  try_get it before calling this function.
638  *
639  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
640  * memcg on the default hierarchy, memcg association is guaranteed to be
641  * more specific (equal or descendant to the associated blkcg) and thus can
642  * identify both the memcg and blkcg associations.
643  *
644  * Because the blkcg associated with a memcg may change as blkcg is enabled
645  * and disabled closer to root in the hierarchy, each wb keeps track of
646  * both the memcg and blkcg associated with it and verifies the blkcg on
647  * each lookup.  On mismatch, the existing wb is discarded and a new one is
648  * created.
649  */
650 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
651                                     struct cgroup_subsys_state *memcg_css,
652                                     gfp_t gfp)
653 {
654         struct bdi_writeback *wb;
655
656         might_sleep_if(gfpflags_allow_blocking(gfp));
657
658         if (!memcg_css->parent)
659                 return &bdi->wb;
660
661         do {
662                 rcu_read_lock();
663                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
664                 if (wb) {
665                         struct cgroup_subsys_state *blkcg_css;
666
667                         /* see whether the blkcg association has changed */
668                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
669                                                      &io_cgrp_subsys);
670                         if (unlikely(wb->blkcg_css != blkcg_css ||
671                                      !wb_tryget(wb)))
672                                 wb = NULL;
673                         css_put(blkcg_css);
674                 }
675                 rcu_read_unlock();
676         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
677
678         return wb;
679 }
680
681 static int cgwb_bdi_init(struct backing_dev_info *bdi)
682 {
683         int ret;
684
685         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
686         bdi->cgwb_congested_tree = RB_ROOT;
687         mutex_init(&bdi->cgwb_release_mutex);
688         init_rwsem(&bdi->wb_switch_rwsem);
689
690         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
691         if (!ret) {
692                 bdi->wb.memcg_css = &root_mem_cgroup->css;
693                 bdi->wb.blkcg_css = blkcg_root_css;
694         }
695         return ret;
696 }
697
698 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
699 {
700         struct radix_tree_iter iter;
701         void **slot;
702         struct bdi_writeback *wb;
703
704         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
705
706         spin_lock_irq(&cgwb_lock);
707         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
708                 cgwb_kill(*slot);
709         spin_unlock_irq(&cgwb_lock);
710
711         mutex_lock(&bdi->cgwb_release_mutex);
712         spin_lock_irq(&cgwb_lock);
713         while (!list_empty(&bdi->wb_list)) {
714                 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
715                                       bdi_node);
716                 spin_unlock_irq(&cgwb_lock);
717                 wb_shutdown(wb);
718                 spin_lock_irq(&cgwb_lock);
719         }
720         spin_unlock_irq(&cgwb_lock);
721         mutex_unlock(&bdi->cgwb_release_mutex);
722 }
723
724 /**
725  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
726  * @memcg: memcg being offlined
727  *
728  * Also prevents creation of any new wb's associated with @memcg.
729  */
730 void wb_memcg_offline(struct mem_cgroup *memcg)
731 {
732         LIST_HEAD(to_destroy);
733         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
734         struct bdi_writeback *wb, *next;
735
736         spin_lock_irq(&cgwb_lock);
737         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
738                 cgwb_kill(wb);
739         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
740         spin_unlock_irq(&cgwb_lock);
741 }
742
743 /**
744  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
745  * @blkcg: blkcg being offlined
746  *
747  * Also prevents creation of any new wb's associated with @blkcg.
748  */
749 void wb_blkcg_offline(struct blkcg *blkcg)
750 {
751         LIST_HEAD(to_destroy);
752         struct bdi_writeback *wb, *next;
753
754         spin_lock_irq(&cgwb_lock);
755         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
756                 cgwb_kill(wb);
757         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
758         spin_unlock_irq(&cgwb_lock);
759 }
760
761 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
762 {
763         struct rb_node *rbn;
764
765         spin_lock_irq(&cgwb_lock);
766         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
767                 struct bdi_writeback_congested *congested =
768                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
769
770                 rb_erase(rbn, &bdi->cgwb_congested_tree);
771                 congested->__bdi = NULL;        /* mark @congested unlinked */
772         }
773         spin_unlock_irq(&cgwb_lock);
774 }
775
776 static void cgwb_bdi_register(struct backing_dev_info *bdi)
777 {
778         spin_lock_irq(&cgwb_lock);
779         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
780         spin_unlock_irq(&cgwb_lock);
781 }
782
783 static int __init cgwb_init(void)
784 {
785         /*
786          * There can be many concurrent release work items overwhelming
787          * system_wq.  Put them in a separate wq and limit concurrency.
788          * There's no point in executing many of these in parallel.
789          */
790         cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
791         if (!cgwb_release_wq)
792                 return -ENOMEM;
793
794         return 0;
795 }
796 subsys_initcall(cgwb_init);
797
798 #else   /* CONFIG_CGROUP_WRITEBACK */
799
800 static int cgwb_bdi_init(struct backing_dev_info *bdi)
801 {
802         int err;
803
804         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
805         if (!bdi->wb_congested)
806                 return -ENOMEM;
807
808         atomic_set(&bdi->wb_congested->refcnt, 1);
809
810         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
811         if (err) {
812                 wb_congested_put(bdi->wb_congested);
813                 return err;
814         }
815         return 0;
816 }
817
818 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
819
820 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
821 {
822         wb_congested_put(bdi->wb_congested);
823 }
824
825 static void cgwb_bdi_register(struct backing_dev_info *bdi)
826 {
827         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
828 }
829
830 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
831 {
832         list_del_rcu(&wb->bdi_node);
833 }
834
835 #endif  /* CONFIG_CGROUP_WRITEBACK */
836
837 static int bdi_init(struct backing_dev_info *bdi)
838 {
839         int ret;
840
841         bdi->dev = NULL;
842
843         kref_init(&bdi->refcnt);
844         bdi->min_ratio = 0;
845         bdi->max_ratio = 100;
846         bdi->max_prop_frac = FPROP_FRAC_BASE;
847         INIT_LIST_HEAD(&bdi->bdi_list);
848         INIT_LIST_HEAD(&bdi->wb_list);
849         init_waitqueue_head(&bdi->wb_waitq);
850
851         ret = cgwb_bdi_init(bdi);
852
853         return ret;
854 }
855
856 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
857 {
858         struct backing_dev_info *bdi;
859
860         bdi = kmalloc_node(sizeof(struct backing_dev_info),
861                            gfp_mask | __GFP_ZERO, node_id);
862         if (!bdi)
863                 return NULL;
864
865         if (bdi_init(bdi)) {
866                 kfree(bdi);
867                 return NULL;
868         }
869         return bdi;
870 }
871 EXPORT_SYMBOL(bdi_alloc_node);
872
873 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
874 {
875         struct device *dev;
876
877         if (bdi->dev)   /* The driver needs to use separate queues per device */
878                 return 0;
879
880         dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
881         if (IS_ERR(dev))
882                 return PTR_ERR(dev);
883
884         cgwb_bdi_register(bdi);
885         bdi->dev = dev;
886
887         bdi_debug_register(bdi, dev_name(dev));
888         set_bit(WB_registered, &bdi->wb.state);
889
890         spin_lock_bh(&bdi_lock);
891         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
892         spin_unlock_bh(&bdi_lock);
893
894         trace_writeback_bdi_register(bdi);
895         return 0;
896 }
897 EXPORT_SYMBOL(bdi_register_va);
898
899 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
900 {
901         va_list args;
902         int ret;
903
904         va_start(args, fmt);
905         ret = bdi_register_va(bdi, fmt, args);
906         va_end(args);
907         return ret;
908 }
909 EXPORT_SYMBOL(bdi_register);
910
911 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
912 {
913         int rc;
914
915         rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
916         if (rc)
917                 return rc;
918         /* Leaking owner reference... */
919         WARN_ON(bdi->owner);
920         bdi->owner = owner;
921         get_device(owner);
922         return 0;
923 }
924 EXPORT_SYMBOL(bdi_register_owner);
925
926 /*
927  * Remove bdi from bdi_list, and ensure that it is no longer visible
928  */
929 static void bdi_remove_from_list(struct backing_dev_info *bdi)
930 {
931         spin_lock_bh(&bdi_lock);
932         list_del_rcu(&bdi->bdi_list);
933         spin_unlock_bh(&bdi_lock);
934
935         synchronize_rcu_expedited();
936 }
937
938 void bdi_unregister(struct backing_dev_info *bdi)
939 {
940         /* make sure nobody finds us on the bdi_list anymore */
941         bdi_remove_from_list(bdi);
942         wb_shutdown(&bdi->wb);
943         cgwb_bdi_unregister(bdi);
944
945         /*
946          * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
947          * update the global bdi_min_ratio.
948          */
949         if (bdi->min_ratio)
950                 bdi_set_min_ratio(bdi, 0);
951
952         if (bdi->dev) {
953                 bdi_debug_unregister(bdi);
954                 device_unregister(bdi->dev);
955                 bdi->dev = NULL;
956         }
957
958         if (bdi->owner) {
959                 put_device(bdi->owner);
960                 bdi->owner = NULL;
961         }
962 }
963
964 static void release_bdi(struct kref *ref)
965 {
966         struct backing_dev_info *bdi =
967                         container_of(ref, struct backing_dev_info, refcnt);
968
969         if (test_bit(WB_registered, &bdi->wb.state))
970                 bdi_unregister(bdi);
971         WARN_ON_ONCE(bdi->dev);
972         wb_exit(&bdi->wb);
973         cgwb_bdi_exit(bdi);
974         kfree(bdi);
975 }
976
977 void bdi_put(struct backing_dev_info *bdi)
978 {
979         kref_put(&bdi->refcnt, release_bdi);
980 }
981 EXPORT_SYMBOL(bdi_put);
982
983 static wait_queue_head_t congestion_wqh[2] = {
984                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
985                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
986         };
987 static atomic_t nr_wb_congested[2];
988
989 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
990 {
991         wait_queue_head_t *wqh = &congestion_wqh[sync];
992         enum wb_congested_state bit;
993
994         bit = sync ? WB_sync_congested : WB_async_congested;
995         if (test_and_clear_bit(bit, &congested->state))
996                 atomic_dec(&nr_wb_congested[sync]);
997         smp_mb__after_atomic();
998         if (waitqueue_active(wqh))
999                 wake_up(wqh);
1000 }
1001 EXPORT_SYMBOL(clear_wb_congested);
1002
1003 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
1004 {
1005         enum wb_congested_state bit;
1006
1007         bit = sync ? WB_sync_congested : WB_async_congested;
1008         if (!test_and_set_bit(bit, &congested->state))
1009                 atomic_inc(&nr_wb_congested[sync]);
1010 }
1011 EXPORT_SYMBOL(set_wb_congested);
1012
1013 /**
1014  * congestion_wait - wait for a backing_dev to become uncongested
1015  * @sync: SYNC or ASYNC IO
1016  * @timeout: timeout in jiffies
1017  *
1018  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1019  * write congestion.  If no backing_devs are congested then just wait for the
1020  * next write to be completed.
1021  */
1022 long congestion_wait(int sync, long timeout)
1023 {
1024         long ret;
1025         unsigned long start = jiffies;
1026         DEFINE_WAIT(wait);
1027         wait_queue_head_t *wqh = &congestion_wqh[sync];
1028
1029         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1030         ret = io_schedule_timeout(timeout);
1031         finish_wait(wqh, &wait);
1032
1033         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1034                                         jiffies_to_usecs(jiffies - start));
1035
1036         return ret;
1037 }
1038 EXPORT_SYMBOL(congestion_wait);
1039
1040 /**
1041  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1042  * @pgdat: A pgdat to check if it is heavily congested
1043  * @sync: SYNC or ASYNC IO
1044  * @timeout: timeout in jiffies
1045  *
1046  * In the event of a congested backing_dev (any backing_dev) and the given
1047  * @pgdat has experienced recent congestion, this waits for up to @timeout
1048  * jiffies for either a BDI to exit congestion of the given @sync queue
1049  * or a write to complete.
1050  *
1051  * In the absence of pgdat congestion, cond_resched() is called to yield
1052  * the processor if necessary but otherwise does not sleep.
1053  *
1054  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1055  * it is the number of jiffies that were still remaining when the function
1056  * returned. return_value == timeout implies the function did not sleep.
1057  */
1058 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1059 {
1060         long ret;
1061         unsigned long start = jiffies;
1062         DEFINE_WAIT(wait);
1063         wait_queue_head_t *wqh = &congestion_wqh[sync];
1064
1065         /*
1066          * If there is no congestion, or heavy congestion is not being
1067          * encountered in the current pgdat, yield if necessary instead
1068          * of sleeping on the congestion queue
1069          */
1070         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1071             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1072                 cond_resched();
1073
1074                 /* In case we scheduled, work out time remaining */
1075                 ret = timeout - (jiffies - start);
1076                 if (ret < 0)
1077                         ret = 0;
1078
1079                 goto out;
1080         }
1081
1082         /* Sleep until uncongested or a write happens */
1083         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1084         ret = io_schedule_timeout(timeout);
1085         finish_wait(wqh, &wait);
1086
1087 out:
1088         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1089                                         jiffies_to_usecs(jiffies - start));
1090
1091         return ret;
1092 }
1093 EXPORT_SYMBOL(wait_iff_congested);
1094
1095 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1096                         void __user *buffer, size_t *lenp, loff_t *ppos)
1097 {
1098         char kbuf[] = "0\n";
1099
1100         if (*ppos || *lenp < sizeof(kbuf)) {
1101                 *lenp = 0;
1102                 return 0;
1103         }
1104
1105         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1106                 return -EFAULT;
1107         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1108                      table->procname);
1109
1110         *lenp = 2;
1111         *ppos += *lenp;
1112         return 2;
1113 }