GNU Linux-libre 4.4.289-gnu1
[releases.git] / include / trace / events / writeback.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6
7 #include <linux/tracepoint.h>
8 #include <linux/backing-dev.h>
9 #include <linux/writeback.h>
10
11 #define show_inode_state(state)                                 \
12         __print_flags(state, "|",                               \
13                 {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
14                 {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
15                 {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
16                 {I_NEW,                 "I_NEW"},               \
17                 {I_WILL_FREE,           "I_WILL_FREE"},         \
18                 {I_FREEING,             "I_FREEING"},           \
19                 {I_CLEAR,               "I_CLEAR"},             \
20                 {I_SYNC,                "I_SYNC"},              \
21                 {I_DIRTY_TIME,          "I_DIRTY_TIME"},        \
22                 {I_DIRTY_TIME_EXPIRED,  "I_DIRTY_TIME_EXPIRED"}, \
23                 {I_REFERENCED,          "I_REFERENCED"}         \
24         )
25
26 /* enums need to be exported to user space */
27 #undef EM
28 #undef EMe
29 #define EM(a,b)         TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b)        TRACE_DEFINE_ENUM(a);
31
32 #define WB_WORK_REASON                                                  \
33         EM( WB_REASON_BACKGROUND,               "background")           \
34         EM( WB_REASON_TRY_TO_FREE_PAGES,        "try_to_free_pages")    \
35         EM( WB_REASON_SYNC,                     "sync")                 \
36         EM( WB_REASON_PERIODIC,                 "periodic")             \
37         EM( WB_REASON_LAPTOP_TIMER,             "laptop_timer")         \
38         EM( WB_REASON_FREE_MORE_MEM,            "free_more_memory")     \
39         EM( WB_REASON_FS_FREE_SPACE,            "fs_free_space")        \
40         EMe(WB_REASON_FORKER_THREAD,            "forker_thread")
41
42 WB_WORK_REASON
43
44 /*
45  * Now redefine the EM() and EMe() macros to map the enums to the strings
46  * that will be printed in the output.
47  */
48 #undef EM
49 #undef EMe
50 #define EM(a,b)         { a, b },
51 #define EMe(a,b)        { a, b }
52
53 struct wb_writeback_work;
54
55 TRACE_EVENT(writeback_dirty_page,
56
57         TP_PROTO(struct page *page, struct address_space *mapping),
58
59         TP_ARGS(page, mapping),
60
61         TP_STRUCT__entry (
62                 __array(char, name, 32)
63                 __field(unsigned long, ino)
64                 __field(pgoff_t, index)
65         ),
66
67         TP_fast_assign(
68                 strscpy_pad(__entry->name,
69                             bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
70                                          NULL), 32);
71                 __entry->ino = mapping ? mapping->host->i_ino : 0;
72                 __entry->index = page->index;
73         ),
74
75         TP_printk("bdi %s: ino=%lu index=%lu",
76                 __entry->name,
77                 __entry->ino,
78                 __entry->index
79         )
80 );
81
82 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
83
84         TP_PROTO(struct inode *inode, int flags),
85
86         TP_ARGS(inode, flags),
87
88         TP_STRUCT__entry (
89                 __array(char, name, 32)
90                 __field(unsigned long, ino)
91                 __field(unsigned long, state)
92                 __field(unsigned long, flags)
93         ),
94
95         TP_fast_assign(
96                 struct backing_dev_info *bdi = inode_to_bdi(inode);
97
98                 /* may be called for files on pseudo FSes w/ unregistered bdi */
99                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
100                 __entry->ino            = inode->i_ino;
101                 __entry->state          = inode->i_state;
102                 __entry->flags          = flags;
103         ),
104
105         TP_printk("bdi %s: ino=%lu state=%s flags=%s",
106                 __entry->name,
107                 __entry->ino,
108                 show_inode_state(__entry->state),
109                 show_inode_state(__entry->flags)
110         )
111 );
112
113 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
114
115         TP_PROTO(struct inode *inode, int flags),
116
117         TP_ARGS(inode, flags)
118 );
119
120 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
121
122         TP_PROTO(struct inode *inode, int flags),
123
124         TP_ARGS(inode, flags)
125 );
126
127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
128
129         TP_PROTO(struct inode *inode, int flags),
130
131         TP_ARGS(inode, flags)
132 );
133
134 #ifdef CREATE_TRACE_POINTS
135 #ifdef CONFIG_CGROUP_WRITEBACK
136
137 static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
138 {
139         return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
140 }
141
142 static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
143 {
144         struct cgroup *cgrp = wb->memcg_css->cgroup;
145         char *path;
146
147         path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
148         WARN_ON_ONCE(path != buf);
149 }
150
151 static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
152 {
153         if (wbc->wb)
154                 return __trace_wb_cgroup_size(wbc->wb);
155         else
156                 return 2;
157 }
158
159 static inline void __trace_wbc_assign_cgroup(char *buf,
160                                              struct writeback_control *wbc)
161 {
162         if (wbc->wb)
163                 __trace_wb_assign_cgroup(buf, wbc->wb);
164         else
165                 strcpy(buf, "/");
166 }
167
168 #else   /* CONFIG_CGROUP_WRITEBACK */
169
170 static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
171 {
172         return 2;
173 }
174
175 static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
176 {
177         strcpy(buf, "/");
178 }
179
180 static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
181 {
182         return 2;
183 }
184
185 static inline void __trace_wbc_assign_cgroup(char *buf,
186                                              struct writeback_control *wbc)
187 {
188         strcpy(buf, "/");
189 }
190
191 #endif  /* CONFIG_CGROUP_WRITEBACK */
192 #endif  /* CREATE_TRACE_POINTS */
193
194 DECLARE_EVENT_CLASS(writeback_write_inode_template,
195
196         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
197
198         TP_ARGS(inode, wbc),
199
200         TP_STRUCT__entry (
201                 __array(char, name, 32)
202                 __field(unsigned long, ino)
203                 __field(int, sync_mode)
204                 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
205         ),
206
207         TP_fast_assign(
208                 strscpy_pad(__entry->name,
209                             bdi_dev_name(inode_to_bdi(inode)), 32);
210                 __entry->ino            = inode->i_ino;
211                 __entry->sync_mode      = wbc->sync_mode;
212                 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
213         ),
214
215         TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
216                 __entry->name,
217                 __entry->ino,
218                 __entry->sync_mode,
219                 __get_str(cgroup)
220         )
221 );
222
223 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
224
225         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
226
227         TP_ARGS(inode, wbc)
228 );
229
230 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
231
232         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
233
234         TP_ARGS(inode, wbc)
235 );
236
237 DECLARE_EVENT_CLASS(writeback_work_class,
238         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
239         TP_ARGS(wb, work),
240         TP_STRUCT__entry(
241                 __array(char, name, 32)
242                 __field(long, nr_pages)
243                 __field(dev_t, sb_dev)
244                 __field(int, sync_mode)
245                 __field(int, for_kupdate)
246                 __field(int, range_cyclic)
247                 __field(int, for_background)
248                 __field(int, reason)
249                 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
250         ),
251         TP_fast_assign(
252                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
253                 __entry->nr_pages = work->nr_pages;
254                 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
255                 __entry->sync_mode = work->sync_mode;
256                 __entry->for_kupdate = work->for_kupdate;
257                 __entry->range_cyclic = work->range_cyclic;
258                 __entry->for_background = work->for_background;
259                 __entry->reason = work->reason;
260                 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
261         ),
262         TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
263                   "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
264                   __entry->name,
265                   MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
266                   __entry->nr_pages,
267                   __entry->sync_mode,
268                   __entry->for_kupdate,
269                   __entry->range_cyclic,
270                   __entry->for_background,
271                   __print_symbolic(__entry->reason, WB_WORK_REASON),
272                   __get_str(cgroup)
273         )
274 );
275 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
276 DEFINE_EVENT(writeback_work_class, name, \
277         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
278         TP_ARGS(wb, work))
279 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
280 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
281 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
282 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
283 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
284
285 TRACE_EVENT(writeback_pages_written,
286         TP_PROTO(long pages_written),
287         TP_ARGS(pages_written),
288         TP_STRUCT__entry(
289                 __field(long,           pages)
290         ),
291         TP_fast_assign(
292                 __entry->pages          = pages_written;
293         ),
294         TP_printk("%ld", __entry->pages)
295 );
296
297 DECLARE_EVENT_CLASS(writeback_class,
298         TP_PROTO(struct bdi_writeback *wb),
299         TP_ARGS(wb),
300         TP_STRUCT__entry(
301                 __array(char, name, 32)
302                 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
303         ),
304         TP_fast_assign(
305                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
306                 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
307         ),
308         TP_printk("bdi %s: cgroup=%s",
309                   __entry->name,
310                   __get_str(cgroup)
311         )
312 );
313 #define DEFINE_WRITEBACK_EVENT(name) \
314 DEFINE_EVENT(writeback_class, name, \
315         TP_PROTO(struct bdi_writeback *wb), \
316         TP_ARGS(wb))
317
318 DEFINE_WRITEBACK_EVENT(writeback_nowork);
319 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
320
321 TRACE_EVENT(writeback_bdi_register,
322         TP_PROTO(struct backing_dev_info *bdi),
323         TP_ARGS(bdi),
324         TP_STRUCT__entry(
325                 __array(char, name, 32)
326         ),
327         TP_fast_assign(
328                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
329         ),
330         TP_printk("bdi %s",
331                 __entry->name
332         )
333 );
334
335 DECLARE_EVENT_CLASS(wbc_class,
336         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
337         TP_ARGS(wbc, bdi),
338         TP_STRUCT__entry(
339                 __array(char, name, 32)
340                 __field(long, nr_to_write)
341                 __field(long, pages_skipped)
342                 __field(int, sync_mode)
343                 __field(int, for_kupdate)
344                 __field(int, for_background)
345                 __field(int, for_reclaim)
346                 __field(int, range_cyclic)
347                 __field(long, range_start)
348                 __field(long, range_end)
349                 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
350         ),
351
352         TP_fast_assign(
353                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
354                 __entry->nr_to_write    = wbc->nr_to_write;
355                 __entry->pages_skipped  = wbc->pages_skipped;
356                 __entry->sync_mode      = wbc->sync_mode;
357                 __entry->for_kupdate    = wbc->for_kupdate;
358                 __entry->for_background = wbc->for_background;
359                 __entry->for_reclaim    = wbc->for_reclaim;
360                 __entry->range_cyclic   = wbc->range_cyclic;
361                 __entry->range_start    = (long)wbc->range_start;
362                 __entry->range_end      = (long)wbc->range_end;
363                 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
364         ),
365
366         TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
367                 "bgrd=%d reclm=%d cyclic=%d "
368                 "start=0x%lx end=0x%lx cgroup=%s",
369                 __entry->name,
370                 __entry->nr_to_write,
371                 __entry->pages_skipped,
372                 __entry->sync_mode,
373                 __entry->for_kupdate,
374                 __entry->for_background,
375                 __entry->for_reclaim,
376                 __entry->range_cyclic,
377                 __entry->range_start,
378                 __entry->range_end,
379                 __get_str(cgroup)
380         )
381 )
382
383 #define DEFINE_WBC_EVENT(name) \
384 DEFINE_EVENT(wbc_class, name, \
385         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
386         TP_ARGS(wbc, bdi))
387 DEFINE_WBC_EVENT(wbc_writepage);
388
389 TRACE_EVENT(writeback_queue_io,
390         TP_PROTO(struct bdi_writeback *wb,
391                  struct wb_writeback_work *work,
392                  unsigned long dirtied_before,
393                  int moved),
394         TP_ARGS(wb, work, dirtied_before, moved),
395         TP_STRUCT__entry(
396                 __array(char,           name, 32)
397                 __field(unsigned long,  older)
398                 __field(long,           age)
399                 __field(int,            moved)
400                 __field(int,            reason)
401                 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
402         ),
403         TP_fast_assign(
404                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
405                 __entry->older  = dirtied_before;
406                 __entry->age    = (jiffies - dirtied_before) * 1000 / HZ;
407                 __entry->moved  = moved;
408                 __entry->reason = work->reason;
409                 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
410         ),
411         TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
412                 __entry->name,
413                 __entry->older, /* dirtied_before in jiffies */
414                 __entry->age,   /* dirtied_before in relative milliseconds */
415                 __entry->moved,
416                 __print_symbolic(__entry->reason, WB_WORK_REASON),
417                 __get_str(cgroup)
418         )
419 );
420
421 TRACE_EVENT(global_dirty_state,
422
423         TP_PROTO(unsigned long background_thresh,
424                  unsigned long dirty_thresh
425         ),
426
427         TP_ARGS(background_thresh,
428                 dirty_thresh
429         ),
430
431         TP_STRUCT__entry(
432                 __field(unsigned long,  nr_dirty)
433                 __field(unsigned long,  nr_writeback)
434                 __field(unsigned long,  nr_unstable)
435                 __field(unsigned long,  background_thresh)
436                 __field(unsigned long,  dirty_thresh)
437                 __field(unsigned long,  dirty_limit)
438                 __field(unsigned long,  nr_dirtied)
439                 __field(unsigned long,  nr_written)
440         ),
441
442         TP_fast_assign(
443                 __entry->nr_dirty       = global_page_state(NR_FILE_DIRTY);
444                 __entry->nr_writeback   = global_page_state(NR_WRITEBACK);
445                 __entry->nr_unstable    = global_page_state(NR_UNSTABLE_NFS);
446                 __entry->nr_dirtied     = global_page_state(NR_DIRTIED);
447                 __entry->nr_written     = global_page_state(NR_WRITTEN);
448                 __entry->background_thresh = background_thresh;
449                 __entry->dirty_thresh   = dirty_thresh;
450                 __entry->dirty_limit    = global_wb_domain.dirty_limit;
451         ),
452
453         TP_printk("dirty=%lu writeback=%lu unstable=%lu "
454                   "bg_thresh=%lu thresh=%lu limit=%lu "
455                   "dirtied=%lu written=%lu",
456                   __entry->nr_dirty,
457                   __entry->nr_writeback,
458                   __entry->nr_unstable,
459                   __entry->background_thresh,
460                   __entry->dirty_thresh,
461                   __entry->dirty_limit,
462                   __entry->nr_dirtied,
463                   __entry->nr_written
464         )
465 );
466
467 #define KBps(x)                 ((x) << (PAGE_SHIFT - 10))
468
469 TRACE_EVENT(bdi_dirty_ratelimit,
470
471         TP_PROTO(struct bdi_writeback *wb,
472                  unsigned long dirty_rate,
473                  unsigned long task_ratelimit),
474
475         TP_ARGS(wb, dirty_rate, task_ratelimit),
476
477         TP_STRUCT__entry(
478                 __array(char,           bdi, 32)
479                 __field(unsigned long,  write_bw)
480                 __field(unsigned long,  avg_write_bw)
481                 __field(unsigned long,  dirty_rate)
482                 __field(unsigned long,  dirty_ratelimit)
483                 __field(unsigned long,  task_ratelimit)
484                 __field(unsigned long,  balanced_dirty_ratelimit)
485                 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
486         ),
487
488         TP_fast_assign(
489                 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
490                 __entry->write_bw       = KBps(wb->write_bandwidth);
491                 __entry->avg_write_bw   = KBps(wb->avg_write_bandwidth);
492                 __entry->dirty_rate     = KBps(dirty_rate);
493                 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
494                 __entry->task_ratelimit = KBps(task_ratelimit);
495                 __entry->balanced_dirty_ratelimit =
496                                         KBps(wb->balanced_dirty_ratelimit);
497                 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
498         ),
499
500         TP_printk("bdi %s: "
501                   "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
502                   "dirty_ratelimit=%lu task_ratelimit=%lu "
503                   "balanced_dirty_ratelimit=%lu cgroup=%s",
504                   __entry->bdi,
505                   __entry->write_bw,            /* write bandwidth */
506                   __entry->avg_write_bw,        /* avg write bandwidth */
507                   __entry->dirty_rate,          /* bdi dirty rate */
508                   __entry->dirty_ratelimit,     /* base ratelimit */
509                   __entry->task_ratelimit, /* ratelimit with position control */
510                   __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
511                   __get_str(cgroup)
512         )
513 );
514
515 TRACE_EVENT(balance_dirty_pages,
516
517         TP_PROTO(struct bdi_writeback *wb,
518                  unsigned long thresh,
519                  unsigned long bg_thresh,
520                  unsigned long dirty,
521                  unsigned long bdi_thresh,
522                  unsigned long bdi_dirty,
523                  unsigned long dirty_ratelimit,
524                  unsigned long task_ratelimit,
525                  unsigned long dirtied,
526                  unsigned long period,
527                  long pause,
528                  unsigned long start_time),
529
530         TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
531                 dirty_ratelimit, task_ratelimit,
532                 dirtied, period, pause, start_time),
533
534         TP_STRUCT__entry(
535                 __array(         char,  bdi, 32)
536                 __field(unsigned long,  limit)
537                 __field(unsigned long,  setpoint)
538                 __field(unsigned long,  dirty)
539                 __field(unsigned long,  bdi_setpoint)
540                 __field(unsigned long,  bdi_dirty)
541                 __field(unsigned long,  dirty_ratelimit)
542                 __field(unsigned long,  task_ratelimit)
543                 __field(unsigned int,   dirtied)
544                 __field(unsigned int,   dirtied_pause)
545                 __field(unsigned long,  paused)
546                 __field(         long,  pause)
547                 __field(unsigned long,  period)
548                 __field(         long,  think)
549                 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
550         ),
551
552         TP_fast_assign(
553                 unsigned long freerun = (thresh + bg_thresh) / 2;
554                 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
555
556                 __entry->limit          = global_wb_domain.dirty_limit;
557                 __entry->setpoint       = (global_wb_domain.dirty_limit +
558                                                 freerun) / 2;
559                 __entry->dirty          = dirty;
560                 __entry->bdi_setpoint   = __entry->setpoint *
561                                                 bdi_thresh / (thresh + 1);
562                 __entry->bdi_dirty      = bdi_dirty;
563                 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
564                 __entry->task_ratelimit = KBps(task_ratelimit);
565                 __entry->dirtied        = dirtied;
566                 __entry->dirtied_pause  = current->nr_dirtied_pause;
567                 __entry->think          = current->dirty_paused_when == 0 ? 0 :
568                          (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
569                 __entry->period         = period * 1000 / HZ;
570                 __entry->pause          = pause * 1000 / HZ;
571                 __entry->paused         = (jiffies - start_time) * 1000 / HZ;
572                 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
573         ),
574
575
576         TP_printk("bdi %s: "
577                   "limit=%lu setpoint=%lu dirty=%lu "
578                   "bdi_setpoint=%lu bdi_dirty=%lu "
579                   "dirty_ratelimit=%lu task_ratelimit=%lu "
580                   "dirtied=%u dirtied_pause=%u "
581                   "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
582                   __entry->bdi,
583                   __entry->limit,
584                   __entry->setpoint,
585                   __entry->dirty,
586                   __entry->bdi_setpoint,
587                   __entry->bdi_dirty,
588                   __entry->dirty_ratelimit,
589                   __entry->task_ratelimit,
590                   __entry->dirtied,
591                   __entry->dirtied_pause,
592                   __entry->paused,      /* ms */
593                   __entry->pause,       /* ms */
594                   __entry->period,      /* ms */
595                   __entry->think,       /* ms */
596                   __get_str(cgroup)
597           )
598 );
599
600 TRACE_EVENT(writeback_sb_inodes_requeue,
601
602         TP_PROTO(struct inode *inode),
603         TP_ARGS(inode),
604
605         TP_STRUCT__entry(
606                 __array(char, name, 32)
607                 __field(unsigned long, ino)
608                 __field(unsigned long, state)
609                 __field(unsigned long, dirtied_when)
610                 __dynamic_array(char, cgroup,
611                                 __trace_wb_cgroup_size(inode_to_wb(inode)))
612         ),
613
614         TP_fast_assign(
615                 strscpy_pad(__entry->name,
616                             bdi_dev_name(inode_to_bdi(inode)), 32);
617                 __entry->ino            = inode->i_ino;
618                 __entry->state          = inode->i_state;
619                 __entry->dirtied_when   = inode->dirtied_when;
620                 __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
621         ),
622
623         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
624                   __entry->name,
625                   __entry->ino,
626                   show_inode_state(__entry->state),
627                   __entry->dirtied_when,
628                   (jiffies - __entry->dirtied_when) / HZ,
629                   __get_str(cgroup)
630         )
631 );
632
633 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
634
635         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
636
637         TP_ARGS(usec_timeout, usec_delayed),
638
639         TP_STRUCT__entry(
640                 __field(        unsigned int,   usec_timeout    )
641                 __field(        unsigned int,   usec_delayed    )
642         ),
643
644         TP_fast_assign(
645                 __entry->usec_timeout   = usec_timeout;
646                 __entry->usec_delayed   = usec_delayed;
647         ),
648
649         TP_printk("usec_timeout=%u usec_delayed=%u",
650                         __entry->usec_timeout,
651                         __entry->usec_delayed)
652 );
653
654 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
655
656         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
657
658         TP_ARGS(usec_timeout, usec_delayed)
659 );
660
661 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
662
663         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
664
665         TP_ARGS(usec_timeout, usec_delayed)
666 );
667
668 DECLARE_EVENT_CLASS(writeback_single_inode_template,
669
670         TP_PROTO(struct inode *inode,
671                  struct writeback_control *wbc,
672                  unsigned long nr_to_write
673         ),
674
675         TP_ARGS(inode, wbc, nr_to_write),
676
677         TP_STRUCT__entry(
678                 __array(char, name, 32)
679                 __field(unsigned long, ino)
680                 __field(unsigned long, state)
681                 __field(unsigned long, dirtied_when)
682                 __field(unsigned long, writeback_index)
683                 __field(long, nr_to_write)
684                 __field(unsigned long, wrote)
685                 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
686         ),
687
688         TP_fast_assign(
689                 strscpy_pad(__entry->name,
690                             bdi_dev_name(inode_to_bdi(inode)), 32);
691                 __entry->ino            = inode->i_ino;
692                 __entry->state          = inode->i_state;
693                 __entry->dirtied_when   = inode->dirtied_when;
694                 __entry->writeback_index = inode->i_mapping->writeback_index;
695                 __entry->nr_to_write    = nr_to_write;
696                 __entry->wrote          = nr_to_write - wbc->nr_to_write;
697                 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
698         ),
699
700         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
701                   "index=%lu to_write=%ld wrote=%lu cgroup=%s",
702                   __entry->name,
703                   __entry->ino,
704                   show_inode_state(__entry->state),
705                   __entry->dirtied_when,
706                   (jiffies - __entry->dirtied_when) / HZ,
707                   __entry->writeback_index,
708                   __entry->nr_to_write,
709                   __entry->wrote,
710                   __get_str(cgroup)
711         )
712 );
713
714 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
715         TP_PROTO(struct inode *inode,
716                  struct writeback_control *wbc,
717                  unsigned long nr_to_write),
718         TP_ARGS(inode, wbc, nr_to_write)
719 );
720
721 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
722         TP_PROTO(struct inode *inode,
723                  struct writeback_control *wbc,
724                  unsigned long nr_to_write),
725         TP_ARGS(inode, wbc, nr_to_write)
726 );
727
728 DECLARE_EVENT_CLASS(writeback_lazytime_template,
729         TP_PROTO(struct inode *inode),
730
731         TP_ARGS(inode),
732
733         TP_STRUCT__entry(
734                 __field(        dev_t,  dev                     )
735                 __field(unsigned long,  ino                     )
736                 __field(unsigned long,  state                   )
737                 __field(        __u16, mode                     )
738                 __field(unsigned long, dirtied_when             )
739         ),
740
741         TP_fast_assign(
742                 __entry->dev    = inode->i_sb->s_dev;
743                 __entry->ino    = inode->i_ino;
744                 __entry->state  = inode->i_state;
745                 __entry->mode   = inode->i_mode;
746                 __entry->dirtied_when = inode->dirtied_when;
747         ),
748
749         TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
750                   MAJOR(__entry->dev), MINOR(__entry->dev),
751                   __entry->ino, __entry->dirtied_when,
752                   show_inode_state(__entry->state), __entry->mode)
753 );
754
755 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
756         TP_PROTO(struct inode *inode),
757
758         TP_ARGS(inode)
759 );
760
761 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
762         TP_PROTO(struct inode *inode),
763
764         TP_ARGS(inode)
765 );
766
767 DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
768
769         TP_PROTO(struct inode *inode),
770
771         TP_ARGS(inode)
772 );
773
774 #endif /* _TRACE_WRITEBACK_H */
775
776 /* This part must be outside protection */
777 #include <trace/define_trace.h>