GNU Linux-libre 4.9.308-gnu1
[releases.git] / include / trace / events / writeback.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6
7 #include <linux/tracepoint.h>
8 #include <linux/backing-dev.h>
9 #include <linux/writeback.h>
10
11 #define show_inode_state(state)                                 \
12         __print_flags(state, "|",                               \
13                 {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
14                 {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
15                 {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
16                 {I_NEW,                 "I_NEW"},               \
17                 {I_WILL_FREE,           "I_WILL_FREE"},         \
18                 {I_FREEING,             "I_FREEING"},           \
19                 {I_CLEAR,               "I_CLEAR"},             \
20                 {I_SYNC,                "I_SYNC"},              \
21                 {I_DIRTY_TIME,          "I_DIRTY_TIME"},        \
22                 {I_DIRTY_TIME_EXPIRED,  "I_DIRTY_TIME_EXPIRED"}, \
23                 {I_REFERENCED,          "I_REFERENCED"}         \
24         )
25
26 /* enums need to be exported to user space */
27 #undef EM
28 #undef EMe
29 #define EM(a,b)         TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b)        TRACE_DEFINE_ENUM(a);
31
32 #define WB_WORK_REASON                                                  \
33         EM( WB_REASON_BACKGROUND,               "background")           \
34         EM( WB_REASON_TRY_TO_FREE_PAGES,        "try_to_free_pages")    \
35         EM( WB_REASON_SYNC,                     "sync")                 \
36         EM( WB_REASON_PERIODIC,                 "periodic")             \
37         EM( WB_REASON_LAPTOP_TIMER,             "laptop_timer")         \
38         EM( WB_REASON_FREE_MORE_MEM,            "free_more_memory")     \
39         EM( WB_REASON_FS_FREE_SPACE,            "fs_free_space")        \
40         EMe(WB_REASON_FORKER_THREAD,            "forker_thread")
41
42 WB_WORK_REASON
43
44 /*
45  * Now redefine the EM() and EMe() macros to map the enums to the strings
46  * that will be printed in the output.
47  */
48 #undef EM
49 #undef EMe
50 #define EM(a,b)         { a, b },
51 #define EMe(a,b)        { a, b }
52
53 struct wb_writeback_work;
54
55 TRACE_EVENT(writeback_dirty_page,
56
57         TP_PROTO(struct page *page, struct address_space *mapping),
58
59         TP_ARGS(page, mapping),
60
61         TP_STRUCT__entry (
62                 __array(char, name, 32)
63                 __field(unsigned long, ino)
64                 __field(pgoff_t, index)
65         ),
66
67         TP_fast_assign(
68                 strscpy_pad(__entry->name,
69                             bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
70                                          NULL), 32);
71                 __entry->ino = mapping ? mapping->host->i_ino : 0;
72                 __entry->index = page->index;
73         ),
74
75         TP_printk("bdi %s: ino=%lu index=%lu",
76                 __entry->name,
77                 __entry->ino,
78                 __entry->index
79         )
80 );
81
82 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
83
84         TP_PROTO(struct inode *inode, int flags),
85
86         TP_ARGS(inode, flags),
87
88         TP_STRUCT__entry (
89                 __array(char, name, 32)
90                 __field(unsigned long, ino)
91                 __field(unsigned long, state)
92                 __field(unsigned long, flags)
93         ),
94
95         TP_fast_assign(
96                 struct backing_dev_info *bdi = inode_to_bdi(inode);
97
98                 /* may be called for files on pseudo FSes w/ unregistered bdi */
99                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
100                 __entry->ino            = inode->i_ino;
101                 __entry->state          = inode->i_state;
102                 __entry->flags          = flags;
103         ),
104
105         TP_printk("bdi %s: ino=%lu state=%s flags=%s",
106                 __entry->name,
107                 __entry->ino,
108                 show_inode_state(__entry->state),
109                 show_inode_state(__entry->flags)
110         )
111 );
112
113 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
114
115         TP_PROTO(struct inode *inode, int flags),
116
117         TP_ARGS(inode, flags)
118 );
119
120 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
121
122         TP_PROTO(struct inode *inode, int flags),
123
124         TP_ARGS(inode, flags)
125 );
126
127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
128
129         TP_PROTO(struct inode *inode, int flags),
130
131         TP_ARGS(inode, flags)
132 );
133
134 #ifdef CREATE_TRACE_POINTS
135 #ifdef CONFIG_CGROUP_WRITEBACK
136
137 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
138 {
139         return wb->memcg_css->cgroup->kn->ino;
140 }
141
142 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
143 {
144         if (wbc->wb)
145                 return __trace_wb_assign_cgroup(wbc->wb);
146         else
147                 return -1U;
148 }
149 #else   /* CONFIG_CGROUP_WRITEBACK */
150
151 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
152 {
153         return -1U;
154 }
155
156 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
157 {
158         return -1U;
159 }
160
161 #endif  /* CONFIG_CGROUP_WRITEBACK */
162 #endif  /* CREATE_TRACE_POINTS */
163
164 DECLARE_EVENT_CLASS(writeback_write_inode_template,
165
166         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
167
168         TP_ARGS(inode, wbc),
169
170         TP_STRUCT__entry (
171                 __array(char, name, 32)
172                 __field(unsigned long, ino)
173                 __field(int, sync_mode)
174                 __field(unsigned int, cgroup_ino)
175         ),
176
177         TP_fast_assign(
178                 strscpy_pad(__entry->name,
179                             bdi_dev_name(inode_to_bdi(inode)), 32);
180                 __entry->ino            = inode->i_ino;
181                 __entry->sync_mode      = wbc->sync_mode;
182                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
183         ),
184
185         TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
186                 __entry->name,
187                 __entry->ino,
188                 __entry->sync_mode,
189                 __entry->cgroup_ino
190         )
191 );
192
193 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
194
195         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
196
197         TP_ARGS(inode, wbc)
198 );
199
200 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
201
202         TP_PROTO(struct inode *inode, struct writeback_control *wbc),
203
204         TP_ARGS(inode, wbc)
205 );
206
207 DECLARE_EVENT_CLASS(writeback_work_class,
208         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
209         TP_ARGS(wb, work),
210         TP_STRUCT__entry(
211                 __array(char, name, 32)
212                 __field(long, nr_pages)
213                 __field(dev_t, sb_dev)
214                 __field(int, sync_mode)
215                 __field(int, for_kupdate)
216                 __field(int, range_cyclic)
217                 __field(int, for_background)
218                 __field(int, reason)
219                 __field(unsigned int, cgroup_ino)
220         ),
221         TP_fast_assign(
222                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
223                 __entry->nr_pages = work->nr_pages;
224                 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
225                 __entry->sync_mode = work->sync_mode;
226                 __entry->for_kupdate = work->for_kupdate;
227                 __entry->range_cyclic = work->range_cyclic;
228                 __entry->for_background = work->for_background;
229                 __entry->reason = work->reason;
230                 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
231         ),
232         TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
233                   "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
234                   __entry->name,
235                   MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
236                   __entry->nr_pages,
237                   __entry->sync_mode,
238                   __entry->for_kupdate,
239                   __entry->range_cyclic,
240                   __entry->for_background,
241                   __print_symbolic(__entry->reason, WB_WORK_REASON),
242                   __entry->cgroup_ino
243         )
244 );
245 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
246 DEFINE_EVENT(writeback_work_class, name, \
247         TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
248         TP_ARGS(wb, work))
249 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
250 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
251 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
252 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
253 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
254
255 TRACE_EVENT(writeback_pages_written,
256         TP_PROTO(long pages_written),
257         TP_ARGS(pages_written),
258         TP_STRUCT__entry(
259                 __field(long,           pages)
260         ),
261         TP_fast_assign(
262                 __entry->pages          = pages_written;
263         ),
264         TP_printk("%ld", __entry->pages)
265 );
266
267 DECLARE_EVENT_CLASS(writeback_class,
268         TP_PROTO(struct bdi_writeback *wb),
269         TP_ARGS(wb),
270         TP_STRUCT__entry(
271                 __array(char, name, 32)
272                 __field(unsigned int, cgroup_ino)
273         ),
274         TP_fast_assign(
275                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
276                 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
277         ),
278         TP_printk("bdi %s: cgroup_ino=%u",
279                   __entry->name,
280                   __entry->cgroup_ino
281         )
282 );
283 #define DEFINE_WRITEBACK_EVENT(name) \
284 DEFINE_EVENT(writeback_class, name, \
285         TP_PROTO(struct bdi_writeback *wb), \
286         TP_ARGS(wb))
287
288 DEFINE_WRITEBACK_EVENT(writeback_nowork);
289 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
290
291 TRACE_EVENT(writeback_bdi_register,
292         TP_PROTO(struct backing_dev_info *bdi),
293         TP_ARGS(bdi),
294         TP_STRUCT__entry(
295                 __array(char, name, 32)
296         ),
297         TP_fast_assign(
298                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
299         ),
300         TP_printk("bdi %s",
301                 __entry->name
302         )
303 );
304
305 DECLARE_EVENT_CLASS(wbc_class,
306         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
307         TP_ARGS(wbc, bdi),
308         TP_STRUCT__entry(
309                 __array(char, name, 32)
310                 __field(long, nr_to_write)
311                 __field(long, pages_skipped)
312                 __field(int, sync_mode)
313                 __field(int, for_kupdate)
314                 __field(int, for_background)
315                 __field(int, for_reclaim)
316                 __field(int, range_cyclic)
317                 __field(long, range_start)
318                 __field(long, range_end)
319                 __field(unsigned int, cgroup_ino)
320         ),
321
322         TP_fast_assign(
323                 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
324                 __entry->nr_to_write    = wbc->nr_to_write;
325                 __entry->pages_skipped  = wbc->pages_skipped;
326                 __entry->sync_mode      = wbc->sync_mode;
327                 __entry->for_kupdate    = wbc->for_kupdate;
328                 __entry->for_background = wbc->for_background;
329                 __entry->for_reclaim    = wbc->for_reclaim;
330                 __entry->range_cyclic   = wbc->range_cyclic;
331                 __entry->range_start    = (long)wbc->range_start;
332                 __entry->range_end      = (long)wbc->range_end;
333                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
334         ),
335
336         TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
337                 "bgrd=%d reclm=%d cyclic=%d "
338                 "start=0x%lx end=0x%lx cgroup_ino=%u",
339                 __entry->name,
340                 __entry->nr_to_write,
341                 __entry->pages_skipped,
342                 __entry->sync_mode,
343                 __entry->for_kupdate,
344                 __entry->for_background,
345                 __entry->for_reclaim,
346                 __entry->range_cyclic,
347                 __entry->range_start,
348                 __entry->range_end,
349                 __entry->cgroup_ino
350         )
351 )
352
353 #define DEFINE_WBC_EVENT(name) \
354 DEFINE_EVENT(wbc_class, name, \
355         TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
356         TP_ARGS(wbc, bdi))
357 DEFINE_WBC_EVENT(wbc_writepage);
358
359 TRACE_EVENT(writeback_queue_io,
360         TP_PROTO(struct bdi_writeback *wb,
361                  struct wb_writeback_work *work,
362                  unsigned long dirtied_before,
363                  int moved),
364         TP_ARGS(wb, work, dirtied_before, moved),
365         TP_STRUCT__entry(
366                 __array(char,           name, 32)
367                 __field(unsigned long,  older)
368                 __field(long,           age)
369                 __field(int,            moved)
370                 __field(int,            reason)
371                 __field(unsigned int,   cgroup_ino)
372         ),
373         TP_fast_assign(
374                 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
375                 __entry->older  = dirtied_before;
376                 __entry->age    = (jiffies - dirtied_before) * 1000 / HZ;
377                 __entry->moved  = moved;
378                 __entry->reason = work->reason;
379                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
380         ),
381         TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
382                 __entry->name,
383                 __entry->older, /* dirtied_before in jiffies */
384                 __entry->age,   /* dirtied_before in relative milliseconds */
385                 __entry->moved,
386                 __print_symbolic(__entry->reason, WB_WORK_REASON),
387                 __entry->cgroup_ino
388         )
389 );
390
391 TRACE_EVENT(global_dirty_state,
392
393         TP_PROTO(unsigned long background_thresh,
394                  unsigned long dirty_thresh
395         ),
396
397         TP_ARGS(background_thresh,
398                 dirty_thresh
399         ),
400
401         TP_STRUCT__entry(
402                 __field(unsigned long,  nr_dirty)
403                 __field(unsigned long,  nr_writeback)
404                 __field(unsigned long,  nr_unstable)
405                 __field(unsigned long,  background_thresh)
406                 __field(unsigned long,  dirty_thresh)
407                 __field(unsigned long,  dirty_limit)
408                 __field(unsigned long,  nr_dirtied)
409                 __field(unsigned long,  nr_written)
410         ),
411
412         TP_fast_assign(
413                 __entry->nr_dirty       = global_node_page_state(NR_FILE_DIRTY);
414                 __entry->nr_writeback   = global_node_page_state(NR_WRITEBACK);
415                 __entry->nr_unstable    = global_node_page_state(NR_UNSTABLE_NFS);
416                 __entry->nr_dirtied     = global_node_page_state(NR_DIRTIED);
417                 __entry->nr_written     = global_node_page_state(NR_WRITTEN);
418                 __entry->background_thresh = background_thresh;
419                 __entry->dirty_thresh   = dirty_thresh;
420                 __entry->dirty_limit    = global_wb_domain.dirty_limit;
421         ),
422
423         TP_printk("dirty=%lu writeback=%lu unstable=%lu "
424                   "bg_thresh=%lu thresh=%lu limit=%lu "
425                   "dirtied=%lu written=%lu",
426                   __entry->nr_dirty,
427                   __entry->nr_writeback,
428                   __entry->nr_unstable,
429                   __entry->background_thresh,
430                   __entry->dirty_thresh,
431                   __entry->dirty_limit,
432                   __entry->nr_dirtied,
433                   __entry->nr_written
434         )
435 );
436
437 #define KBps(x)                 ((x) << (PAGE_SHIFT - 10))
438
439 TRACE_EVENT(bdi_dirty_ratelimit,
440
441         TP_PROTO(struct bdi_writeback *wb,
442                  unsigned long dirty_rate,
443                  unsigned long task_ratelimit),
444
445         TP_ARGS(wb, dirty_rate, task_ratelimit),
446
447         TP_STRUCT__entry(
448                 __array(char,           bdi, 32)
449                 __field(unsigned long,  write_bw)
450                 __field(unsigned long,  avg_write_bw)
451                 __field(unsigned long,  dirty_rate)
452                 __field(unsigned long,  dirty_ratelimit)
453                 __field(unsigned long,  task_ratelimit)
454                 __field(unsigned long,  balanced_dirty_ratelimit)
455                 __field(unsigned int,   cgroup_ino)
456         ),
457
458         TP_fast_assign(
459                 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
460                 __entry->write_bw       = KBps(wb->write_bandwidth);
461                 __entry->avg_write_bw   = KBps(wb->avg_write_bandwidth);
462                 __entry->dirty_rate     = KBps(dirty_rate);
463                 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
464                 __entry->task_ratelimit = KBps(task_ratelimit);
465                 __entry->balanced_dirty_ratelimit =
466                                         KBps(wb->balanced_dirty_ratelimit);
467                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
468         ),
469
470         TP_printk("bdi %s: "
471                   "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
472                   "dirty_ratelimit=%lu task_ratelimit=%lu "
473                   "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
474                   __entry->bdi,
475                   __entry->write_bw,            /* write bandwidth */
476                   __entry->avg_write_bw,        /* avg write bandwidth */
477                   __entry->dirty_rate,          /* bdi dirty rate */
478                   __entry->dirty_ratelimit,     /* base ratelimit */
479                   __entry->task_ratelimit, /* ratelimit with position control */
480                   __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
481                   __entry->cgroup_ino
482         )
483 );
484
485 TRACE_EVENT(balance_dirty_pages,
486
487         TP_PROTO(struct bdi_writeback *wb,
488                  unsigned long thresh,
489                  unsigned long bg_thresh,
490                  unsigned long dirty,
491                  unsigned long bdi_thresh,
492                  unsigned long bdi_dirty,
493                  unsigned long dirty_ratelimit,
494                  unsigned long task_ratelimit,
495                  unsigned long dirtied,
496                  unsigned long period,
497                  long pause,
498                  unsigned long start_time),
499
500         TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
501                 dirty_ratelimit, task_ratelimit,
502                 dirtied, period, pause, start_time),
503
504         TP_STRUCT__entry(
505                 __array(         char,  bdi, 32)
506                 __field(unsigned long,  limit)
507                 __field(unsigned long,  setpoint)
508                 __field(unsigned long,  dirty)
509                 __field(unsigned long,  bdi_setpoint)
510                 __field(unsigned long,  bdi_dirty)
511                 __field(unsigned long,  dirty_ratelimit)
512                 __field(unsigned long,  task_ratelimit)
513                 __field(unsigned int,   dirtied)
514                 __field(unsigned int,   dirtied_pause)
515                 __field(unsigned long,  paused)
516                 __field(         long,  pause)
517                 __field(unsigned long,  period)
518                 __field(         long,  think)
519                 __field(unsigned int,   cgroup_ino)
520         ),
521
522         TP_fast_assign(
523                 unsigned long freerun = (thresh + bg_thresh) / 2;
524                 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
525
526                 __entry->limit          = global_wb_domain.dirty_limit;
527                 __entry->setpoint       = (global_wb_domain.dirty_limit +
528                                                 freerun) / 2;
529                 __entry->dirty          = dirty;
530                 __entry->bdi_setpoint   = __entry->setpoint *
531                                                 bdi_thresh / (thresh + 1);
532                 __entry->bdi_dirty      = bdi_dirty;
533                 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
534                 __entry->task_ratelimit = KBps(task_ratelimit);
535                 __entry->dirtied        = dirtied;
536                 __entry->dirtied_pause  = current->nr_dirtied_pause;
537                 __entry->think          = current->dirty_paused_when == 0 ? 0 :
538                          (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
539                 __entry->period         = period * 1000 / HZ;
540                 __entry->pause          = pause * 1000 / HZ;
541                 __entry->paused         = (jiffies - start_time) * 1000 / HZ;
542                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
543         ),
544
545
546         TP_printk("bdi %s: "
547                   "limit=%lu setpoint=%lu dirty=%lu "
548                   "bdi_setpoint=%lu bdi_dirty=%lu "
549                   "dirty_ratelimit=%lu task_ratelimit=%lu "
550                   "dirtied=%u dirtied_pause=%u "
551                   "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
552                   __entry->bdi,
553                   __entry->limit,
554                   __entry->setpoint,
555                   __entry->dirty,
556                   __entry->bdi_setpoint,
557                   __entry->bdi_dirty,
558                   __entry->dirty_ratelimit,
559                   __entry->task_ratelimit,
560                   __entry->dirtied,
561                   __entry->dirtied_pause,
562                   __entry->paused,      /* ms */
563                   __entry->pause,       /* ms */
564                   __entry->period,      /* ms */
565                   __entry->think,       /* ms */
566                   __entry->cgroup_ino
567           )
568 );
569
570 TRACE_EVENT(writeback_sb_inodes_requeue,
571
572         TP_PROTO(struct inode *inode),
573         TP_ARGS(inode),
574
575         TP_STRUCT__entry(
576                 __array(char, name, 32)
577                 __field(unsigned long, ino)
578                 __field(unsigned long, state)
579                 __field(unsigned long, dirtied_when)
580                 __field(unsigned int, cgroup_ino)
581         ),
582
583         TP_fast_assign(
584                 strscpy_pad(__entry->name,
585                             bdi_dev_name(inode_to_bdi(inode)), 32);
586                 __entry->ino            = inode->i_ino;
587                 __entry->state          = inode->i_state;
588                 __entry->dirtied_when   = inode->dirtied_when;
589                 __entry->cgroup_ino     = __trace_wb_assign_cgroup(inode_to_wb(inode));
590         ),
591
592         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
593                   __entry->name,
594                   __entry->ino,
595                   show_inode_state(__entry->state),
596                   __entry->dirtied_when,
597                   (jiffies - __entry->dirtied_when) / HZ,
598                   __entry->cgroup_ino
599         )
600 );
601
602 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
603
604         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
605
606         TP_ARGS(usec_timeout, usec_delayed),
607
608         TP_STRUCT__entry(
609                 __field(        unsigned int,   usec_timeout    )
610                 __field(        unsigned int,   usec_delayed    )
611         ),
612
613         TP_fast_assign(
614                 __entry->usec_timeout   = usec_timeout;
615                 __entry->usec_delayed   = usec_delayed;
616         ),
617
618         TP_printk("usec_timeout=%u usec_delayed=%u",
619                         __entry->usec_timeout,
620                         __entry->usec_delayed)
621 );
622
623 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
624
625         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
626
627         TP_ARGS(usec_timeout, usec_delayed)
628 );
629
630 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
631
632         TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
633
634         TP_ARGS(usec_timeout, usec_delayed)
635 );
636
637 DECLARE_EVENT_CLASS(writeback_single_inode_template,
638
639         TP_PROTO(struct inode *inode,
640                  struct writeback_control *wbc,
641                  unsigned long nr_to_write
642         ),
643
644         TP_ARGS(inode, wbc, nr_to_write),
645
646         TP_STRUCT__entry(
647                 __array(char, name, 32)
648                 __field(unsigned long, ino)
649                 __field(unsigned long, state)
650                 __field(unsigned long, dirtied_when)
651                 __field(unsigned long, writeback_index)
652                 __field(long, nr_to_write)
653                 __field(unsigned long, wrote)
654                 __field(unsigned int, cgroup_ino)
655         ),
656
657         TP_fast_assign(
658                 strscpy_pad(__entry->name,
659                             bdi_dev_name(inode_to_bdi(inode)), 32);
660                 __entry->ino            = inode->i_ino;
661                 __entry->state          = inode->i_state;
662                 __entry->dirtied_when   = inode->dirtied_when;
663                 __entry->writeback_index = inode->i_mapping->writeback_index;
664                 __entry->nr_to_write    = nr_to_write;
665                 __entry->wrote          = nr_to_write - wbc->nr_to_write;
666                 __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
667         ),
668
669         TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
670                   "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
671                   __entry->name,
672                   __entry->ino,
673                   show_inode_state(__entry->state),
674                   __entry->dirtied_when,
675                   (jiffies - __entry->dirtied_when) / HZ,
676                   __entry->writeback_index,
677                   __entry->nr_to_write,
678                   __entry->wrote,
679                   __entry->cgroup_ino
680         )
681 );
682
683 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
684         TP_PROTO(struct inode *inode,
685                  struct writeback_control *wbc,
686                  unsigned long nr_to_write),
687         TP_ARGS(inode, wbc, nr_to_write)
688 );
689
690 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
691         TP_PROTO(struct inode *inode,
692                  struct writeback_control *wbc,
693                  unsigned long nr_to_write),
694         TP_ARGS(inode, wbc, nr_to_write)
695 );
696
697 DECLARE_EVENT_CLASS(writeback_inode_template,
698         TP_PROTO(struct inode *inode),
699
700         TP_ARGS(inode),
701
702         TP_STRUCT__entry(
703                 __field(        dev_t,  dev                     )
704                 __field(unsigned long,  ino                     )
705                 __field(unsigned long,  state                   )
706                 __field(        __u16, mode                     )
707                 __field(unsigned long, dirtied_when             )
708         ),
709
710         TP_fast_assign(
711                 __entry->dev    = inode->i_sb->s_dev;
712                 __entry->ino    = inode->i_ino;
713                 __entry->state  = inode->i_state;
714                 __entry->mode   = inode->i_mode;
715                 __entry->dirtied_when = inode->dirtied_when;
716         ),
717
718         TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
719                   MAJOR(__entry->dev), MINOR(__entry->dev),
720                   __entry->ino, __entry->dirtied_when,
721                   show_inode_state(__entry->state), __entry->mode)
722 );
723
724 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
725         TP_PROTO(struct inode *inode),
726
727         TP_ARGS(inode)
728 );
729
730 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
731         TP_PROTO(struct inode *inode),
732
733         TP_ARGS(inode)
734 );
735
736 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
737
738         TP_PROTO(struct inode *inode),
739
740         TP_ARGS(inode)
741 );
742
743 /*
744  * Inode writeback list tracking.
745  */
746
747 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
748         TP_PROTO(struct inode *inode),
749         TP_ARGS(inode)
750 );
751
752 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
753         TP_PROTO(struct inode *inode),
754         TP_ARGS(inode)
755 );
756
757 #endif /* _TRACE_WRITEBACK_H */
758
759 /* This part must be outside protection */
760 #include <trace/define_trace.h>