1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11
12 #define show_inode_state(state) \
13 __print_flags(state, "|", \
14 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
15 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
16 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
17 {I_NEW, "I_NEW"}, \
18 {I_WILL_FREE, "I_WILL_FREE"}, \
19 {I_FREEING, "I_FREEING"}, \
20 {I_CLEAR, "I_CLEAR"}, \
21 {I_SYNC, "I_SYNC"}, \
22 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
23 {I_REFERENCED, "I_REFERENCED"}, \
24 {I_LINKABLE, "I_LINKABLE"}, \
25 {I_WB_SWITCH, "I_WB_SWITCH"}, \
26 {I_OVL_INUSE, "I_OVL_INUSE"}, \
27 {I_CREATING, "I_CREATING"}, \
28 {I_DONTCACHE, "I_DONTCACHE"}, \
29 {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
30 {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
31 {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
32 )
33
34 /* enums need to be exported to user space */
35 #undef EM
36 #undef EMe
37 #define EM(a,b) TRACE_DEFINE_ENUM(a);
38 #define EMe(a,b) TRACE_DEFINE_ENUM(a);
39
40 #define WB_WORK_REASON \
41 EM( WB_REASON_BACKGROUND, "background") \
42 EM( WB_REASON_VMSCAN, "vmscan") \
43 EM( WB_REASON_SYNC, "sync") \
44 EM( WB_REASON_PERIODIC, "periodic") \
45 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
46 EM( WB_REASON_FORKER_THREAD, "forker_thread") \
47 EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
48
49 WB_WORK_REASON
50
51 /*
52 * Now redefine the EM() and EMe() macros to map the enums to the strings
53 * that will be printed in the output.
54 */
55 #undef EM
56 #undef EMe
57 #define EM(a,b) { a, b },
58 #define EMe(a,b) { a, b }
59
60 struct wb_writeback_work;
61
62 DECLARE_EVENT_CLASS(writeback_folio_template,
63
64 TP_PROTO(struct folio *folio, struct address_space *mapping),
65
66 TP_ARGS(folio, mapping),
67
68 TP_STRUCT__entry (
69 __array(char, name, 32)
70 __field(ino_t, ino)
71 __field(pgoff_t, index)
72 ),
73
74 TP_fast_assign(
75 strscpy_pad(__entry->name,
76 bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
77 NULL), 32);
78 __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
79 __entry->index = folio->index;
80 ),
81
82 TP_printk("bdi %s: ino=%lu index=%lu",
83 __entry->name,
84 (unsigned long)__entry->ino,
85 __entry->index
86 )
87 );
88
89 DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
90
91 TP_PROTO(struct folio *folio, struct address_space *mapping),
92
93 TP_ARGS(folio, mapping)
94 );
95
96 DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
97
98 TP_PROTO(struct folio *folio, struct address_space *mapping),
99
100 TP_ARGS(folio, mapping)
101 );
102
103 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
104
105 TP_PROTO(struct inode *inode, int flags),
106
107 TP_ARGS(inode, flags),
108
109 TP_STRUCT__entry (
110 __array(char, name, 32)
111 __field(ino_t, ino)
112 __field(unsigned long, state)
113 __field(unsigned long, flags)
114 ),
115
116 TP_fast_assign(
117 struct backing_dev_info *bdi = inode_to_bdi(inode);
118
119 /* may be called for files on pseudo FSes w/ unregistered bdi */
120 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
121 __entry->ino = inode->i_ino;
122 __entry->state = inode_state_read_once(inode);
123 __entry->flags = flags;
124 ),
125
126 TP_printk("bdi %s: ino=%lu state=%s flags=%s",
127 __entry->name,
128 (unsigned long)__entry->ino,
129 show_inode_state(__entry->state),
130 show_inode_state(__entry->flags)
131 )
132 );
133
134 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
135
136 TP_PROTO(struct inode *inode, int flags),
137
138 TP_ARGS(inode, flags)
139 );
140
141 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
142
143 TP_PROTO(struct inode *inode, int flags),
144
145 TP_ARGS(inode, flags)
146 );
147
148 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
149
150 TP_PROTO(struct inode *inode, int flags),
151
152 TP_ARGS(inode, flags)
153 );
154
155 #ifdef CREATE_TRACE_POINTS
156 #ifdef CONFIG_CGROUP_WRITEBACK
157
__trace_wb_assign_cgroup(struct bdi_writeback * wb)158 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
159 {
160 return cgroup_ino(wb->memcg_css->cgroup);
161 }
162
__trace_wbc_assign_cgroup(struct writeback_control * wbc)163 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
164 {
165 if (wbc->wb)
166 return __trace_wb_assign_cgroup(wbc->wb);
167 else
168 return 1;
169 }
170 #else /* CONFIG_CGROUP_WRITEBACK */
171
__trace_wb_assign_cgroup(struct bdi_writeback * wb)172 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
173 {
174 return 1;
175 }
176
__trace_wbc_assign_cgroup(struct writeback_control * wbc)177 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
178 {
179 return 1;
180 }
181
182 #endif /* CONFIG_CGROUP_WRITEBACK */
183 #endif /* CREATE_TRACE_POINTS */
184
185 #ifdef CONFIG_CGROUP_WRITEBACK
186 TRACE_EVENT(inode_foreign_history,
187
188 TP_PROTO(struct inode *inode, struct writeback_control *wbc,
189 unsigned int history),
190
191 TP_ARGS(inode, wbc, history),
192
193 TP_STRUCT__entry(
194 __array(char, name, 32)
195 __field(ino_t, ino)
196 __field(ino_t, cgroup_ino)
197 __field(unsigned int, history)
198 ),
199
200 TP_fast_assign(
201 strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
202 __entry->ino = inode->i_ino;
203 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
204 __entry->history = history;
205 ),
206
207 TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
208 __entry->name,
209 (unsigned long)__entry->ino,
210 (unsigned long)__entry->cgroup_ino,
211 __entry->history
212 )
213 );
214
215 TRACE_EVENT(inode_switch_wbs_queue,
216
217 TP_PROTO(struct bdi_writeback *old_wb, struct bdi_writeback *new_wb,
218 unsigned int count),
219
220 TP_ARGS(old_wb, new_wb, count),
221
222 TP_STRUCT__entry(
223 __array(char, name, 32)
224 __field(ino_t, old_cgroup_ino)
225 __field(ino_t, new_cgroup_ino)
226 __field(unsigned int, count)
227 ),
228
229 TP_fast_assign(
230 strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
231 __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
232 __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
233 __entry->count = count;
234 ),
235
236 TP_printk("bdi %s: old_cgroup_ino=%lu new_cgroup_ino=%lu count=%u",
237 __entry->name,
238 (unsigned long)__entry->old_cgroup_ino,
239 (unsigned long)__entry->new_cgroup_ino,
240 __entry->count
241 )
242 );
243
244 TRACE_EVENT(inode_switch_wbs,
245
246 TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
247 struct bdi_writeback *new_wb),
248
249 TP_ARGS(inode, old_wb, new_wb),
250
251 TP_STRUCT__entry(
252 __array(char, name, 32)
253 __field(ino_t, ino)
254 __field(ino_t, old_cgroup_ino)
255 __field(ino_t, new_cgroup_ino)
256 ),
257
258 TP_fast_assign(
259 strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
260 __entry->ino = inode->i_ino;
261 __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
262 __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
263 ),
264
265 TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
266 __entry->name,
267 (unsigned long)__entry->ino,
268 (unsigned long)__entry->old_cgroup_ino,
269 (unsigned long)__entry->new_cgroup_ino
270 )
271 );
272
273 TRACE_EVENT(track_foreign_dirty,
274
275 TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
276
277 TP_ARGS(folio, wb),
278
279 TP_STRUCT__entry(
280 __array(char, name, 32)
281 __field(u64, bdi_id)
282 __field(ino_t, ino)
283 __field(unsigned int, memcg_id)
284 __field(ino_t, cgroup_ino)
285 __field(ino_t, page_cgroup_ino)
286 ),
287
288 TP_fast_assign(
289 struct address_space *mapping = folio_mapping(folio);
290 struct inode *inode = mapping ? mapping->host : NULL;
291
292 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
293 __entry->bdi_id = wb->bdi->id;
294 __entry->ino = inode ? inode->i_ino : 0;
295 __entry->memcg_id = wb->memcg_css->id;
296 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
297 __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
298 ),
299
300 TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
301 __entry->name,
302 __entry->bdi_id,
303 (unsigned long)__entry->ino,
304 __entry->memcg_id,
305 (unsigned long)__entry->cgroup_ino,
306 (unsigned long)__entry->page_cgroup_ino
307 )
308 );
309
310 TRACE_EVENT(flush_foreign,
311
312 TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
313 unsigned int frn_memcg_id),
314
315 TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
316
317 TP_STRUCT__entry(
318 __array(char, name, 32)
319 __field(ino_t, cgroup_ino)
320 __field(unsigned int, frn_bdi_id)
321 __field(unsigned int, frn_memcg_id)
322 ),
323
324 TP_fast_assign(
325 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
326 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
327 __entry->frn_bdi_id = frn_bdi_id;
328 __entry->frn_memcg_id = frn_memcg_id;
329 ),
330
331 TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
332 __entry->name,
333 (unsigned long)__entry->cgroup_ino,
334 __entry->frn_bdi_id,
335 __entry->frn_memcg_id
336 )
337 );
338 #endif
339
340 DECLARE_EVENT_CLASS(writeback_write_inode_template,
341
342 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
343
344 TP_ARGS(inode, wbc),
345
346 TP_STRUCT__entry (
347 __array(char, name, 32)
348 __field(ino_t, ino)
349 __field(int, sync_mode)
350 __field(ino_t, cgroup_ino)
351 ),
352
353 TP_fast_assign(
354 strscpy_pad(__entry->name,
355 bdi_dev_name(inode_to_bdi(inode)), 32);
356 __entry->ino = inode->i_ino;
357 __entry->sync_mode = wbc->sync_mode;
358 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
359 ),
360
361 TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
362 __entry->name,
363 (unsigned long)__entry->ino,
364 __entry->sync_mode,
365 (unsigned long)__entry->cgroup_ino
366 )
367 );
368
369 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
370
371 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
372
373 TP_ARGS(inode, wbc)
374 );
375
376 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
377
378 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
379
380 TP_ARGS(inode, wbc)
381 );
382
383 DECLARE_EVENT_CLASS(writeback_work_class,
384 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
385 TP_ARGS(wb, work),
386 TP_STRUCT__entry(
387 __array(char, name, 32)
388 __field(long, nr_pages)
389 __field(dev_t, sb_dev)
390 __field(int, sync_mode)
391 __field(int, for_kupdate)
392 __field(int, range_cyclic)
393 __field(int, for_background)
394 __field(int, reason)
395 __field(ino_t, cgroup_ino)
396 ),
397 TP_fast_assign(
398 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
399 __entry->nr_pages = work->nr_pages;
400 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
401 __entry->sync_mode = work->sync_mode;
402 __entry->for_kupdate = work->for_kupdate;
403 __entry->range_cyclic = work->range_cyclic;
404 __entry->for_background = work->for_background;
405 __entry->reason = work->reason;
406 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
407 ),
408 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
409 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
410 __entry->name,
411 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
412 __entry->nr_pages,
413 __entry->sync_mode,
414 __entry->for_kupdate,
415 __entry->range_cyclic,
416 __entry->for_background,
417 __print_symbolic(__entry->reason, WB_WORK_REASON),
418 (unsigned long)__entry->cgroup_ino
419 )
420 );
421 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
422 DEFINE_EVENT(writeback_work_class, name, \
423 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
424 TP_ARGS(wb, work))
425 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
426 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
427 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
428 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
429 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
430
431 TRACE_EVENT(writeback_pages_written,
432 TP_PROTO(long pages_written),
433 TP_ARGS(pages_written),
434 TP_STRUCT__entry(
435 __field(long, pages)
436 ),
437 TP_fast_assign(
438 __entry->pages = pages_written;
439 ),
440 TP_printk("%ld", __entry->pages)
441 );
442
443 DECLARE_EVENT_CLASS(writeback_class,
444 TP_PROTO(struct bdi_writeback *wb),
445 TP_ARGS(wb),
446 TP_STRUCT__entry(
447 __array(char, name, 32)
448 __field(ino_t, cgroup_ino)
449 ),
450 TP_fast_assign(
451 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
452 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
453 ),
454 TP_printk("bdi %s: cgroup_ino=%lu",
455 __entry->name,
456 (unsigned long)__entry->cgroup_ino
457 )
458 );
459 #define DEFINE_WRITEBACK_EVENT(name) \
460 DEFINE_EVENT(writeback_class, name, \
461 TP_PROTO(struct bdi_writeback *wb), \
462 TP_ARGS(wb))
463
464 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
465
466 TRACE_EVENT(writeback_bdi_register,
467 TP_PROTO(struct backing_dev_info *bdi),
468 TP_ARGS(bdi),
469 TP_STRUCT__entry(
470 __array(char, name, 32)
471 ),
472 TP_fast_assign(
473 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
474 ),
475 TP_printk("bdi %s",
476 __entry->name
477 )
478 );
479
480 DECLARE_EVENT_CLASS(wbc_class,
481 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
482 TP_ARGS(wbc, bdi),
483 TP_STRUCT__entry(
484 __array(char, name, 32)
485 __field(long, nr_to_write)
486 __field(long, pages_skipped)
487 __field(int, sync_mode)
488 __field(int, for_kupdate)
489 __field(int, for_background)
490 __field(int, range_cyclic)
491 __field(long, range_start)
492 __field(long, range_end)
493 __field(ino_t, cgroup_ino)
494 ),
495
496 TP_fast_assign(
497 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
498 __entry->nr_to_write = wbc->nr_to_write;
499 __entry->pages_skipped = wbc->pages_skipped;
500 __entry->sync_mode = wbc->sync_mode;
501 __entry->for_kupdate = wbc->for_kupdate;
502 __entry->for_background = wbc->for_background;
503 __entry->range_cyclic = wbc->range_cyclic;
504 __entry->range_start = (long)wbc->range_start;
505 __entry->range_end = (long)wbc->range_end;
506 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
507 ),
508
509 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
510 "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
511 __entry->name,
512 __entry->nr_to_write,
513 __entry->pages_skipped,
514 __entry->sync_mode,
515 __entry->for_kupdate,
516 __entry->for_background,
517 __entry->range_cyclic,
518 __entry->range_start,
519 __entry->range_end,
520 (unsigned long)__entry->cgroup_ino
521 )
522 )
523
524 #define DEFINE_WBC_EVENT(name) \
525 DEFINE_EVENT(wbc_class, name, \
526 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
527 TP_ARGS(wbc, bdi))
528 DEFINE_WBC_EVENT(wbc_writepage);
529
530 TRACE_EVENT(writeback_queue_io,
531 TP_PROTO(struct bdi_writeback *wb,
532 struct wb_writeback_work *work,
533 unsigned long dirtied_before,
534 int moved),
535 TP_ARGS(wb, work, dirtied_before, moved),
536 TP_STRUCT__entry(
537 __array(char, name, 32)
538 __field(unsigned long, older)
539 __field(long, age)
540 __field(int, moved)
541 __field(int, reason)
542 __field(ino_t, cgroup_ino)
543 ),
544 TP_fast_assign(
545 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
546 __entry->older = dirtied_before;
547 __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
548 __entry->moved = moved;
549 __entry->reason = work->reason;
550 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
551 ),
552 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
553 __entry->name,
554 __entry->older, /* dirtied_before in jiffies */
555 __entry->age, /* dirtied_before in relative milliseconds */
556 __entry->moved,
557 __print_symbolic(__entry->reason, WB_WORK_REASON),
558 (unsigned long)__entry->cgroup_ino
559 )
560 );
561
562 TRACE_EVENT(global_dirty_state,
563
564 TP_PROTO(unsigned long background_thresh,
565 unsigned long dirty_thresh
566 ),
567
568 TP_ARGS(background_thresh,
569 dirty_thresh
570 ),
571
572 TP_STRUCT__entry(
573 __field(unsigned long, nr_dirty)
574 __field(unsigned long, nr_writeback)
575 __field(unsigned long, background_thresh)
576 __field(unsigned long, dirty_thresh)
577 __field(unsigned long, dirty_limit)
578 __field(unsigned long, nr_dirtied)
579 __field(unsigned long, nr_written)
580 ),
581
582 TP_fast_assign(
583 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
584 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
585 __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
586 __entry->nr_written = global_node_page_state(NR_WRITTEN);
587 __entry->background_thresh = background_thresh;
588 __entry->dirty_thresh = dirty_thresh;
589 __entry->dirty_limit = global_wb_domain.dirty_limit;
590 ),
591
592 TP_printk("dirty=%lu writeback=%lu "
593 "bg_thresh=%lu thresh=%lu limit=%lu "
594 "dirtied=%lu written=%lu",
595 __entry->nr_dirty,
596 __entry->nr_writeback,
597 __entry->background_thresh,
598 __entry->dirty_thresh,
599 __entry->dirty_limit,
600 __entry->nr_dirtied,
601 __entry->nr_written
602 )
603 );
604
605 #define KBps(x) ((x) << (PAGE_SHIFT - 10))
606
607 TRACE_EVENT(bdi_dirty_ratelimit,
608
609 TP_PROTO(struct bdi_writeback *wb,
610 unsigned long dirty_rate,
611 unsigned long task_ratelimit),
612
613 TP_ARGS(wb, dirty_rate, task_ratelimit),
614
615 TP_STRUCT__entry(
616 __array(char, bdi, 32)
617 __field(unsigned long, write_bw)
618 __field(unsigned long, avg_write_bw)
619 __field(unsigned long, dirty_rate)
620 __field(unsigned long, dirty_ratelimit)
621 __field(unsigned long, task_ratelimit)
622 __field(unsigned long, balanced_dirty_ratelimit)
623 __field(ino_t, cgroup_ino)
624 ),
625
626 TP_fast_assign(
627 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
628 __entry->write_bw = KBps(wb->write_bandwidth);
629 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
630 __entry->dirty_rate = KBps(dirty_rate);
631 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
632 __entry->task_ratelimit = KBps(task_ratelimit);
633 __entry->balanced_dirty_ratelimit =
634 KBps(wb->balanced_dirty_ratelimit);
635 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
636 ),
637
638 TP_printk("bdi %s: "
639 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
640 "dirty_ratelimit=%lu task_ratelimit=%lu "
641 "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
642 __entry->bdi,
643 __entry->write_bw, /* write bandwidth */
644 __entry->avg_write_bw, /* avg write bandwidth */
645 __entry->dirty_rate, /* bdi dirty rate */
646 __entry->dirty_ratelimit, /* base ratelimit */
647 __entry->task_ratelimit, /* ratelimit with position control */
648 __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
649 (unsigned long)__entry->cgroup_ino
650 )
651 );
652
653 TRACE_EVENT(balance_dirty_pages,
654
655 TP_PROTO(struct bdi_writeback *wb,
656 struct dirty_throttle_control *dtc,
657 unsigned long dirty_ratelimit,
658 unsigned long task_ratelimit,
659 unsigned long dirtied,
660 unsigned long period,
661 long pause,
662 unsigned long start_time),
663
664 TP_ARGS(wb, dtc,
665 dirty_ratelimit, task_ratelimit,
666 dirtied, period, pause, start_time),
667
668 TP_STRUCT__entry(
669 __array( char, bdi, 32)
670 __field(unsigned long, limit)
671 __field(unsigned long, setpoint)
672 __field(unsigned long, dirty)
673 __field(unsigned long, wb_setpoint)
674 __field(unsigned long, wb_dirty)
675 __field(unsigned long, dirty_ratelimit)
676 __field(unsigned long, task_ratelimit)
677 __field(unsigned int, dirtied)
678 __field(unsigned int, dirtied_pause)
679 __field(unsigned long, paused)
680 __field( long, pause)
681 __field(unsigned long, period)
682 __field( long, think)
683 __field(ino_t, cgroup_ino)
684 ),
685
686 TP_fast_assign(
687 unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
688 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
689
690 __entry->limit = dtc->limit;
691 __entry->setpoint = (dtc->limit + freerun) / 2;
692 __entry->dirty = dtc->dirty;
693 __entry->wb_setpoint = __entry->setpoint *
694 dtc->wb_thresh / (dtc->thresh + 1);
695 __entry->wb_dirty = dtc->wb_dirty;
696 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
697 __entry->task_ratelimit = KBps(task_ratelimit);
698 __entry->dirtied = dirtied;
699 __entry->dirtied_pause = current->nr_dirtied_pause;
700 __entry->think = current->dirty_paused_when == 0 ? 0 :
701 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
702 __entry->period = period * 1000 / HZ;
703 __entry->pause = pause * 1000 / HZ;
704 __entry->paused = (jiffies - start_time) * 1000 / HZ;
705 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
706 ),
707
708
709 TP_printk("bdi %s: "
710 "limit=%lu setpoint=%lu dirty=%lu "
711 "wb_setpoint=%lu wb_dirty=%lu "
712 "dirty_ratelimit=%lu task_ratelimit=%lu "
713 "dirtied=%u dirtied_pause=%u "
714 "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
715 __entry->bdi,
716 __entry->limit,
717 __entry->setpoint,
718 __entry->dirty,
719 __entry->wb_setpoint,
720 __entry->wb_dirty,
721 __entry->dirty_ratelimit,
722 __entry->task_ratelimit,
723 __entry->dirtied,
724 __entry->dirtied_pause,
725 __entry->paused, /* ms */
726 __entry->pause, /* ms */
727 __entry->period, /* ms */
728 __entry->think, /* ms */
729 (unsigned long)__entry->cgroup_ino
730 )
731 );
732
733 TRACE_EVENT(writeback_sb_inodes_requeue,
734
735 TP_PROTO(struct inode *inode),
736 TP_ARGS(inode),
737
738 TP_STRUCT__entry(
739 __array(char, name, 32)
740 __field(ino_t, ino)
741 __field(unsigned long, state)
742 __field(unsigned long, dirtied_when)
743 __field(ino_t, cgroup_ino)
744 ),
745
746 TP_fast_assign(
747 strscpy_pad(__entry->name,
748 bdi_dev_name(inode_to_bdi(inode)), 32);
749 __entry->ino = inode->i_ino;
750 __entry->state = inode_state_read_once(inode);
751 __entry->dirtied_when = inode->dirtied_when;
752 __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
753 ),
754
755 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
756 __entry->name,
757 (unsigned long)__entry->ino,
758 show_inode_state(__entry->state),
759 __entry->dirtied_when,
760 (jiffies - __entry->dirtied_when) / HZ,
761 (unsigned long)__entry->cgroup_ino
762 )
763 );
764
765 DECLARE_EVENT_CLASS(writeback_single_inode_template,
766
767 TP_PROTO(struct inode *inode,
768 struct writeback_control *wbc,
769 unsigned long nr_to_write
770 ),
771
772 TP_ARGS(inode, wbc, nr_to_write),
773
774 TP_STRUCT__entry(
775 __array(char, name, 32)
776 __field(ino_t, ino)
777 __field(unsigned long, state)
778 __field(unsigned long, dirtied_when)
779 __field(unsigned long, writeback_index)
780 __field(long, nr_to_write)
781 __field(unsigned long, wrote)
782 __field(ino_t, cgroup_ino)
783 ),
784
785 TP_fast_assign(
786 strscpy_pad(__entry->name,
787 bdi_dev_name(inode_to_bdi(inode)), 32);
788 __entry->ino = inode->i_ino;
789 __entry->state = inode_state_read_once(inode);
790 __entry->dirtied_when = inode->dirtied_when;
791 __entry->writeback_index = inode->i_mapping->writeback_index;
792 __entry->nr_to_write = nr_to_write;
793 __entry->wrote = nr_to_write - wbc->nr_to_write;
794 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
795 ),
796
797 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
798 "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
799 __entry->name,
800 (unsigned long)__entry->ino,
801 show_inode_state(__entry->state),
802 __entry->dirtied_when,
803 (jiffies - __entry->dirtied_when) / HZ,
804 __entry->writeback_index,
805 __entry->nr_to_write,
806 __entry->wrote,
807 (unsigned long)__entry->cgroup_ino
808 )
809 );
810
811 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
812 TP_PROTO(struct inode *inode,
813 struct writeback_control *wbc,
814 unsigned long nr_to_write),
815 TP_ARGS(inode, wbc, nr_to_write)
816 );
817
818 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
819 TP_PROTO(struct inode *inode,
820 struct writeback_control *wbc,
821 unsigned long nr_to_write),
822 TP_ARGS(inode, wbc, nr_to_write)
823 );
824
825 DECLARE_EVENT_CLASS(writeback_inode_template,
826 TP_PROTO(struct inode *inode),
827
828 TP_ARGS(inode),
829
830 TP_STRUCT__entry(
831 __field( dev_t, dev )
832 __field( ino_t, ino )
833 __field(unsigned long, state )
834 __field( __u16, mode )
835 __field(unsigned long, dirtied_when )
836 ),
837
838 TP_fast_assign(
839 __entry->dev = inode->i_sb->s_dev;
840 __entry->ino = inode->i_ino;
841 __entry->state = inode_state_read_once(inode);
842 __entry->mode = inode->i_mode;
843 __entry->dirtied_when = inode->dirtied_when;
844 ),
845
846 TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
847 MAJOR(__entry->dev), MINOR(__entry->dev),
848 (unsigned long)__entry->ino, __entry->dirtied_when,
849 show_inode_state(__entry->state), __entry->mode)
850 );
851
852 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
853 TP_PROTO(struct inode *inode),
854
855 TP_ARGS(inode)
856 );
857
858 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
859
860 TP_PROTO(struct inode *inode),
861
862 TP_ARGS(inode)
863 );
864
865 /*
866 * Inode writeback list tracking.
867 */
868
869 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
870 TP_PROTO(struct inode *inode),
871 TP_ARGS(inode)
872 );
873
874 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
875 TP_PROTO(struct inode *inode),
876 TP_ARGS(inode)
877 );
878
879 #endif /* _TRACE_WRITEBACK_H */
880
881 /* This part must be outside protection */
882 #include <trace/define_trace.h>
883