1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4 
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 
7 #include <linux/tracepoint.h>
8 
9 #define TRACE_BPOS_entries(name)				\
10 	__field(u64,			name##_inode	)	\
11 	__field(u64,			name##_offset	)	\
12 	__field(u32,			name##_snapshot	)
13 
14 #define TRACE_BPOS_assign(dst, src)				\
15 	__entry->dst##_inode		= (src).inode;		\
16 	__entry->dst##_offset		= (src).offset;		\
17 	__entry->dst##_snapshot		= (src).snapshot
18 
19 DECLARE_EVENT_CLASS(bpos,
20 	TP_PROTO(const struct bpos *p),
21 	TP_ARGS(p),
22 
23 	TP_STRUCT__entry(
24 		TRACE_BPOS_entries(p)
25 	),
26 
27 	TP_fast_assign(
28 		TRACE_BPOS_assign(p, *p);
29 	),
30 
31 	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
32 );
33 
34 DECLARE_EVENT_CLASS(fs_str,
35 	TP_PROTO(struct bch_fs *c, const char *str),
36 	TP_ARGS(c, str),
37 
38 	TP_STRUCT__entry(
39 		__field(dev_t,		dev			)
40 		__string(str,		str			)
41 	),
42 
43 	TP_fast_assign(
44 		__entry->dev		= c->dev;
45 		__assign_str(str);
46 	),
47 
48 	TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
49 );
50 
51 DECLARE_EVENT_CLASS(trans_str,
52 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 	TP_ARGS(trans, caller_ip, str),
54 
55 	TP_STRUCT__entry(
56 		__field(dev_t,		dev			)
57 		__array(char,		trans_fn, 32		)
58 		__field(unsigned long,	caller_ip		)
59 		__string(str,		str			)
60 	),
61 
62 	TP_fast_assign(
63 		__entry->dev		= trans->c->dev;
64 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 		__entry->caller_ip		= caller_ip;
66 		__assign_str(str);
67 	),
68 
69 	TP_printk("%d,%d %s %pS %s",
70 		  MAJOR(__entry->dev), MINOR(__entry->dev),
71 		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
72 );
73 
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 	TP_PROTO(struct btree_trans *trans, const char *str),
76 	TP_ARGS(trans, str),
77 
78 	TP_STRUCT__entry(
79 		__field(dev_t,		dev			)
80 		__array(char,		trans_fn, 32		)
81 		__string(str,		str			)
82 	),
83 
84 	TP_fast_assign(
85 		__entry->dev		= trans->c->dev;
86 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
87 		__assign_str(str);
88 	),
89 
90 	TP_printk("%d,%d %s %s",
91 		  MAJOR(__entry->dev), MINOR(__entry->dev),
92 		  __entry->trans_fn, __get_str(str))
93 );
94 
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 	TP_PROTO(struct bch_fs *c, struct btree *b),
97 	TP_ARGS(c, b),
98 
99 	TP_STRUCT__entry(
100 		__field(dev_t,		dev			)
101 		__field(u8,		level			)
102 		__field(u8,		btree_id		)
103 		TRACE_BPOS_entries(pos)
104 	),
105 
106 	TP_fast_assign(
107 		__entry->dev		= c->dev;
108 		__entry->level		= b->c.level;
109 		__entry->btree_id	= b->c.btree_id;
110 		TRACE_BPOS_assign(pos, b->key.k.p);
111 	),
112 
113 	TP_printk("%d,%d %u %s %llu:%llu:%u",
114 		  MAJOR(__entry->dev), MINOR(__entry->dev),
115 		  __entry->level,
116 		  bch2_btree_id_str(__entry->btree_id),
117 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
118 );
119 
120 DECLARE_EVENT_CLASS(btree_node,
121 	TP_PROTO(struct btree_trans *trans, struct btree *b),
122 	TP_ARGS(trans, b),
123 
124 	TP_STRUCT__entry(
125 		__field(dev_t,		dev			)
126 		__array(char,		trans_fn, 32		)
127 		__field(u8,		level			)
128 		__field(u8,		btree_id		)
129 		TRACE_BPOS_entries(pos)
130 	),
131 
132 	TP_fast_assign(
133 		__entry->dev		= trans->c->dev;
134 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 		__entry->level		= b->c.level;
136 		__entry->btree_id	= b->c.btree_id;
137 		TRACE_BPOS_assign(pos, b->key.k.p);
138 	),
139 
140 	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
142 		  __entry->level,
143 		  bch2_btree_id_str(__entry->btree_id),
144 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
145 );
146 
147 DECLARE_EVENT_CLASS(bch_fs,
148 	TP_PROTO(struct bch_fs *c),
149 	TP_ARGS(c),
150 
151 	TP_STRUCT__entry(
152 		__field(dev_t,		dev			)
153 	),
154 
155 	TP_fast_assign(
156 		__entry->dev		= c->dev;
157 	),
158 
159 	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
160 );
161 
162 DECLARE_EVENT_CLASS(btree_trans,
163 	TP_PROTO(struct btree_trans *trans),
164 	TP_ARGS(trans),
165 
166 	TP_STRUCT__entry(
167 		__field(dev_t,		dev			)
168 		__array(char,		trans_fn, 32		)
169 	),
170 
171 	TP_fast_assign(
172 		__entry->dev		= trans->c->dev;
173 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
174 	),
175 
176 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
177 );
178 
179 DECLARE_EVENT_CLASS(bio,
180 	TP_PROTO(struct bio *bio),
181 	TP_ARGS(bio),
182 
183 	TP_STRUCT__entry(
184 		__field(dev_t,		dev			)
185 		__field(sector_t,	sector			)
186 		__field(unsigned int,	nr_sector		)
187 		__array(char,		rwbs,	6		)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
192 		__entry->sector		= bio->bi_iter.bi_sector;
193 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
194 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
195 	),
196 
197 	TP_printk("%d,%d  %s %llu + %u",
198 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 		  (unsigned long long)__entry->sector, __entry->nr_sector)
200 );
201 
202 /* disk_accounting.c */
203 
204 TRACE_EVENT(accounting_mem_insert,
205 	TP_PROTO(struct bch_fs *c, const char *acc),
206 	TP_ARGS(c, acc),
207 
208 	TP_STRUCT__entry(
209 		__field(dev_t,		dev			)
210 		__field(unsigned,	new_nr			)
211 		__string(acc,		acc			)
212 	),
213 
214 	TP_fast_assign(
215 		__entry->dev		= c->dev;
216 		__entry->new_nr		= c->accounting.k.nr;
217 		__assign_str(acc);
218 	),
219 
220 	TP_printk("%d,%d entries %u added %s",
221 		  MAJOR(__entry->dev), MINOR(__entry->dev),
222 		  __entry->new_nr,
223 		  __get_str(acc))
224 );
225 
226 /* fs.c: */
227 TRACE_EVENT(bch2_sync_fs,
228 	TP_PROTO(struct super_block *sb, int wait),
229 
230 	TP_ARGS(sb, wait),
231 
232 	TP_STRUCT__entry(
233 		__field(	dev_t,	dev			)
234 		__field(	int,	wait			)
235 
236 	),
237 
238 	TP_fast_assign(
239 		__entry->dev	= sb->s_dev;
240 		__entry->wait	= wait;
241 	),
242 
243 	TP_printk("dev %d,%d wait %d",
244 		  MAJOR(__entry->dev), MINOR(__entry->dev),
245 		  __entry->wait)
246 );
247 
248 /* fs-io.c: */
249 TRACE_EVENT(bch2_fsync,
250 	TP_PROTO(struct file *file, int datasync),
251 
252 	TP_ARGS(file, datasync),
253 
254 	TP_STRUCT__entry(
255 		__field(	dev_t,	dev			)
256 		__field(	ino_t,	ino			)
257 		__field(	ino_t,	parent			)
258 		__field(	int,	datasync		)
259 	),
260 
261 	TP_fast_assign(
262 		struct dentry *dentry = file->f_path.dentry;
263 
264 		__entry->dev		= dentry->d_sb->s_dev;
265 		__entry->ino		= d_inode(dentry)->i_ino;
266 		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
267 		__entry->datasync	= datasync;
268 	),
269 
270 	TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
271 		  MAJOR(__entry->dev), MINOR(__entry->dev),
272 		  (unsigned long) __entry->ino,
273 		  (unsigned long) __entry->parent, __entry->datasync)
274 );
275 
276 /* super-io.c: */
277 TRACE_EVENT(write_super,
278 	TP_PROTO(struct bch_fs *c, unsigned long ip),
279 	TP_ARGS(c, ip),
280 
281 	TP_STRUCT__entry(
282 		__field(dev_t,		dev	)
283 		__field(unsigned long,	ip	)
284 	),
285 
286 	TP_fast_assign(
287 		__entry->dev		= c->dev;
288 		__entry->ip		= ip;
289 	),
290 
291 	TP_printk("%d,%d for %pS",
292 		  MAJOR(__entry->dev), MINOR(__entry->dev),
293 		  (void *) __entry->ip)
294 );
295 
296 /* io.c: */
297 
298 DEFINE_EVENT(bio, io_read_promote,
299 	TP_PROTO(struct bio *bio),
300 	TP_ARGS(bio)
301 );
302 
303 TRACE_EVENT(io_read_nopromote,
304 	TP_PROTO(struct bch_fs *c, int ret),
305 	TP_ARGS(c, ret),
306 
307 	TP_STRUCT__entry(
308 		__field(dev_t,		dev		)
309 		__array(char,		ret, 32		)
310 	),
311 
312 	TP_fast_assign(
313 		__entry->dev		= c->dev;
314 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
315 	),
316 
317 	TP_printk("%d,%d ret %s",
318 		  MAJOR(__entry->dev), MINOR(__entry->dev),
319 		  __entry->ret)
320 );
321 
322 DEFINE_EVENT(bio, io_read_bounce,
323 	TP_PROTO(struct bio *bio),
324 	TP_ARGS(bio)
325 );
326 
327 DEFINE_EVENT(bio, io_read_split,
328 	TP_PROTO(struct bio *bio),
329 	TP_ARGS(bio)
330 );
331 
332 DEFINE_EVENT(bio, io_read_retry,
333 	TP_PROTO(struct bio *bio),
334 	TP_ARGS(bio)
335 );
336 
337 DEFINE_EVENT(bio, io_read_reuse_race,
338 	TP_PROTO(struct bio *bio),
339 	TP_ARGS(bio)
340 );
341 
342 /* ec.c */
343 
344 TRACE_EVENT(stripe_create,
345 	TP_PROTO(struct bch_fs *c, u64 idx, int ret),
346 	TP_ARGS(c, idx, ret),
347 
348 	TP_STRUCT__entry(
349 		__field(dev_t,		dev			)
350 		__field(u64,		idx			)
351 		__field(int,		ret			)
352 	),
353 
354 	TP_fast_assign(
355 		__entry->dev			= c->dev;
356 		__entry->idx			= idx;
357 		__entry->ret			= ret;
358 	),
359 
360 	TP_printk("%d,%d idx %llu ret %i",
361 		  MAJOR(__entry->dev), MINOR(__entry->dev),
362 		  __entry->idx,
363 		  __entry->ret)
364 );
365 
366 /* Journal */
367 
368 DEFINE_EVENT(bch_fs, journal_full,
369 	TP_PROTO(struct bch_fs *c),
370 	TP_ARGS(c)
371 );
372 
373 DEFINE_EVENT(fs_str, journal_entry_full,
374 	TP_PROTO(struct bch_fs *c, const char *str),
375 	TP_ARGS(c, str)
376 );
377 
378 DEFINE_EVENT(fs_str, journal_entry_close,
379 	TP_PROTO(struct bch_fs *c, const char *str),
380 	TP_ARGS(c, str)
381 );
382 
383 DEFINE_EVENT(bio, journal_write,
384 	TP_PROTO(struct bio *bio),
385 	TP_ARGS(bio)
386 );
387 
388 TRACE_EVENT(journal_reclaim_start,
389 	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
390 		 u64 min_nr, u64 min_key_cache,
391 		 u64 btree_cache_dirty, u64 btree_cache_total,
392 		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
393 	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
394 		btree_cache_dirty, btree_cache_total,
395 		btree_key_cache_dirty, btree_key_cache_total),
396 
397 	TP_STRUCT__entry(
398 		__field(dev_t,		dev			)
399 		__field(bool,		direct			)
400 		__field(bool,		kicked			)
401 		__field(u64,		min_nr			)
402 		__field(u64,		min_key_cache		)
403 		__field(u64,		btree_cache_dirty	)
404 		__field(u64,		btree_cache_total	)
405 		__field(u64,		btree_key_cache_dirty	)
406 		__field(u64,		btree_key_cache_total	)
407 	),
408 
409 	TP_fast_assign(
410 		__entry->dev			= c->dev;
411 		__entry->direct			= direct;
412 		__entry->kicked			= kicked;
413 		__entry->min_nr			= min_nr;
414 		__entry->min_key_cache		= min_key_cache;
415 		__entry->btree_cache_dirty	= btree_cache_dirty;
416 		__entry->btree_cache_total	= btree_cache_total;
417 		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
418 		__entry->btree_key_cache_total	= btree_key_cache_total;
419 	),
420 
421 	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
422 		  MAJOR(__entry->dev), MINOR(__entry->dev),
423 		  __entry->direct,
424 		  __entry->kicked,
425 		  __entry->min_nr,
426 		  __entry->min_key_cache,
427 		  __entry->btree_cache_dirty,
428 		  __entry->btree_cache_total,
429 		  __entry->btree_key_cache_dirty,
430 		  __entry->btree_key_cache_total)
431 );
432 
433 TRACE_EVENT(journal_reclaim_finish,
434 	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
435 	TP_ARGS(c, nr_flushed),
436 
437 	TP_STRUCT__entry(
438 		__field(dev_t,		dev			)
439 		__field(u64,		nr_flushed		)
440 	),
441 
442 	TP_fast_assign(
443 		__entry->dev		= c->dev;
444 		__entry->nr_flushed	= nr_flushed;
445 	),
446 
447 	TP_printk("%d,%d flushed %llu",
448 		  MAJOR(__entry->dev), MINOR(__entry->dev),
449 		  __entry->nr_flushed)
450 );
451 
452 /* bset.c: */
453 
454 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
455 	TP_PROTO(const struct bpos *p),
456 	TP_ARGS(p)
457 );
458 
459 /* Btree cache: */
460 
461 TRACE_EVENT(btree_cache_scan,
462 	TP_PROTO(long nr_to_scan, long can_free, long ret),
463 	TP_ARGS(nr_to_scan, can_free, ret),
464 
465 	TP_STRUCT__entry(
466 		__field(long,	nr_to_scan		)
467 		__field(long,	can_free		)
468 		__field(long,	ret			)
469 	),
470 
471 	TP_fast_assign(
472 		__entry->nr_to_scan	= nr_to_scan;
473 		__entry->can_free	= can_free;
474 		__entry->ret		= ret;
475 	),
476 
477 	TP_printk("scanned for %li nodes, can free %li, ret %li",
478 		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
479 );
480 
481 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
482 	TP_PROTO(struct bch_fs *c, struct btree *b),
483 	TP_ARGS(c, b)
484 );
485 
486 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
487 	TP_PROTO(struct btree_trans *trans),
488 	TP_ARGS(trans)
489 );
490 
491 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
492 	TP_PROTO(struct btree_trans *trans),
493 	TP_ARGS(trans)
494 );
495 
496 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
497 	TP_PROTO(struct btree_trans *trans),
498 	TP_ARGS(trans)
499 );
500 
501 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
502 	TP_PROTO(struct btree_trans *trans),
503 	TP_ARGS(trans)
504 );
505 
506 /* Btree */
507 
508 DEFINE_EVENT(btree_node, btree_node_read,
509 	TP_PROTO(struct btree_trans *trans, struct btree *b),
510 	TP_ARGS(trans, b)
511 );
512 
513 TRACE_EVENT(btree_node_write,
514 	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
515 	TP_ARGS(b, bytes, sectors),
516 
517 	TP_STRUCT__entry(
518 		__field(enum btree_node_type,	type)
519 		__field(unsigned,	bytes			)
520 		__field(unsigned,	sectors			)
521 	),
522 
523 	TP_fast_assign(
524 		__entry->type	= btree_node_type(b);
525 		__entry->bytes	= bytes;
526 		__entry->sectors = sectors;
527 	),
528 
529 	TP_printk("bkey type %u bytes %u sectors %u",
530 		  __entry->type , __entry->bytes, __entry->sectors)
531 );
532 
533 DEFINE_EVENT(btree_node, btree_node_alloc,
534 	TP_PROTO(struct btree_trans *trans, struct btree *b),
535 	TP_ARGS(trans, b)
536 );
537 
538 DEFINE_EVENT(btree_node, btree_node_free,
539 	TP_PROTO(struct btree_trans *trans, struct btree *b),
540 	TP_ARGS(trans, b)
541 );
542 
543 TRACE_EVENT(btree_reserve_get_fail,
544 	TP_PROTO(const char *trans_fn,
545 		 unsigned long caller_ip,
546 		 size_t required,
547 		 int ret),
548 	TP_ARGS(trans_fn, caller_ip, required, ret),
549 
550 	TP_STRUCT__entry(
551 		__array(char,			trans_fn, 32	)
552 		__field(unsigned long,		caller_ip	)
553 		__field(size_t,			required	)
554 		__array(char,			ret, 32		)
555 	),
556 
557 	TP_fast_assign(
558 		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
559 		__entry->caller_ip	= caller_ip;
560 		__entry->required	= required;
561 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
562 	),
563 
564 	TP_printk("%s %pS required %zu ret %s",
565 		  __entry->trans_fn,
566 		  (void *) __entry->caller_ip,
567 		  __entry->required,
568 		  __entry->ret)
569 );
570 
571 DEFINE_EVENT(btree_node, btree_node_compact,
572 	TP_PROTO(struct btree_trans *trans, struct btree *b),
573 	TP_ARGS(trans, b)
574 );
575 
576 DEFINE_EVENT(btree_node, btree_node_merge,
577 	TP_PROTO(struct btree_trans *trans, struct btree *b),
578 	TP_ARGS(trans, b)
579 );
580 
581 DEFINE_EVENT(btree_node, btree_node_split,
582 	TP_PROTO(struct btree_trans *trans, struct btree *b),
583 	TP_ARGS(trans, b)
584 );
585 
586 DEFINE_EVENT(btree_node, btree_node_rewrite,
587 	TP_PROTO(struct btree_trans *trans, struct btree *b),
588 	TP_ARGS(trans, b)
589 );
590 
591 DEFINE_EVENT(btree_node, btree_node_set_root,
592 	TP_PROTO(struct btree_trans *trans, struct btree *b),
593 	TP_ARGS(trans, b)
594 );
595 
596 TRACE_EVENT(btree_path_relock_fail,
597 	TP_PROTO(struct btree_trans *trans,
598 		 unsigned long caller_ip,
599 		 struct btree_path *path,
600 		 unsigned level),
601 	TP_ARGS(trans, caller_ip, path, level),
602 
603 	TP_STRUCT__entry(
604 		__array(char,			trans_fn, 32	)
605 		__field(unsigned long,		caller_ip	)
606 		__field(u8,			btree_id	)
607 		__field(u8,			level		)
608 		__field(u8,			path_idx)
609 		TRACE_BPOS_entries(pos)
610 		__array(char,			node, 24	)
611 		__field(u8,			self_read_count	)
612 		__field(u8,			self_intent_count)
613 		__field(u8,			read_count	)
614 		__field(u8,			intent_count	)
615 		__field(u32,			iter_lock_seq	)
616 		__field(u32,			node_lock_seq	)
617 	),
618 
619 	TP_fast_assign(
620 		struct btree *b = btree_path_node(path, level);
621 		struct six_lock_count c;
622 
623 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
624 		__entry->caller_ip		= caller_ip;
625 		__entry->btree_id		= path->btree_id;
626 		__entry->level			= level;
627 		__entry->path_idx		= path - trans->paths;
628 		TRACE_BPOS_assign(pos, path->pos);
629 
630 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
631 		__entry->self_read_count	= c.n[SIX_LOCK_read];
632 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
633 
634 		if (IS_ERR(b)) {
635 			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
636 		} else {
637 			c = six_lock_counts(&path->l[level].b->c.lock);
638 			__entry->read_count	= c.n[SIX_LOCK_read];
639 			__entry->intent_count	= c.n[SIX_LOCK_intent];
640 			scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
641 		}
642 		__entry->iter_lock_seq		= path->l[level].lock_seq;
643 		__entry->node_lock_seq		= is_btree_node(path, level)
644 			? six_lock_seq(&path->l[level].b->c.lock)
645 			: 0;
646 	),
647 
648 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
649 		  __entry->trans_fn,
650 		  (void *) __entry->caller_ip,
651 		  __entry->path_idx,
652 		  bch2_btree_id_str(__entry->btree_id),
653 		  __entry->pos_inode,
654 		  __entry->pos_offset,
655 		  __entry->pos_snapshot,
656 		  __entry->level,
657 		  __entry->node,
658 		  __entry->self_read_count,
659 		  __entry->self_intent_count,
660 		  __entry->read_count,
661 		  __entry->intent_count,
662 		  __entry->iter_lock_seq,
663 		  __entry->node_lock_seq)
664 );
665 
666 TRACE_EVENT(btree_path_upgrade_fail,
667 	TP_PROTO(struct btree_trans *trans,
668 		 unsigned long caller_ip,
669 		 struct btree_path *path,
670 		 unsigned level),
671 	TP_ARGS(trans, caller_ip, path, level),
672 
673 	TP_STRUCT__entry(
674 		__array(char,			trans_fn, 32	)
675 		__field(unsigned long,		caller_ip	)
676 		__field(u8,			btree_id	)
677 		__field(u8,			level		)
678 		__field(u8,			path_idx)
679 		TRACE_BPOS_entries(pos)
680 		__field(u8,			locked		)
681 		__field(u8,			self_read_count	)
682 		__field(u8,			self_intent_count)
683 		__field(u8,			read_count	)
684 		__field(u8,			intent_count	)
685 		__field(u32,			iter_lock_seq	)
686 		__field(u32,			node_lock_seq	)
687 	),
688 
689 	TP_fast_assign(
690 		struct six_lock_count c;
691 
692 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
693 		__entry->caller_ip		= caller_ip;
694 		__entry->btree_id		= path->btree_id;
695 		__entry->level			= level;
696 		__entry->path_idx		= path - trans->paths;
697 		TRACE_BPOS_assign(pos, path->pos);
698 		__entry->locked			= btree_node_locked(path, level);
699 
700 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
701 		__entry->self_read_count	= c.n[SIX_LOCK_read];
702 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
703 		c = six_lock_counts(&path->l[level].b->c.lock);
704 		__entry->read_count		= c.n[SIX_LOCK_read];
705 		__entry->intent_count		= c.n[SIX_LOCK_intent];
706 		__entry->iter_lock_seq		= path->l[level].lock_seq;
707 		__entry->node_lock_seq		= is_btree_node(path, level)
708 			? six_lock_seq(&path->l[level].b->c.lock)
709 			: 0;
710 	),
711 
712 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
713 		  __entry->trans_fn,
714 		  (void *) __entry->caller_ip,
715 		  __entry->path_idx,
716 		  bch2_btree_id_str(__entry->btree_id),
717 		  __entry->pos_inode,
718 		  __entry->pos_offset,
719 		  __entry->pos_snapshot,
720 		  __entry->level,
721 		  __entry->locked,
722 		  __entry->self_read_count,
723 		  __entry->self_intent_count,
724 		  __entry->read_count,
725 		  __entry->intent_count,
726 		  __entry->iter_lock_seq,
727 		  __entry->node_lock_seq)
728 );
729 
730 /* Garbage collection */
731 
732 DEFINE_EVENT(bch_fs, gc_gens_start,
733 	TP_PROTO(struct bch_fs *c),
734 	TP_ARGS(c)
735 );
736 
737 DEFINE_EVENT(bch_fs, gc_gens_end,
738 	TP_PROTO(struct bch_fs *c),
739 	TP_ARGS(c)
740 );
741 
742 /* Allocator */
743 
744 DEFINE_EVENT(fs_str, bucket_alloc,
745 	TP_PROTO(struct bch_fs *c, const char *str),
746 	TP_ARGS(c, str)
747 );
748 
749 DEFINE_EVENT(fs_str, bucket_alloc_fail,
750 	TP_PROTO(struct bch_fs *c, const char *str),
751 	TP_ARGS(c, str)
752 );
753 
754 DECLARE_EVENT_CLASS(discard_buckets_class,
755 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
756 		 u64 need_journal_commit, u64 discarded, const char *err),
757 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
758 
759 	TP_STRUCT__entry(
760 		__field(dev_t,		dev			)
761 		__field(u64,		seen			)
762 		__field(u64,		open			)
763 		__field(u64,		need_journal_commit	)
764 		__field(u64,		discarded		)
765 		__array(char,		err,	16		)
766 	),
767 
768 	TP_fast_assign(
769 		__entry->dev			= c->dev;
770 		__entry->seen			= seen;
771 		__entry->open			= open;
772 		__entry->need_journal_commit	= need_journal_commit;
773 		__entry->discarded		= discarded;
774 		strscpy(__entry->err, err, sizeof(__entry->err));
775 	),
776 
777 	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
778 		  MAJOR(__entry->dev), MINOR(__entry->dev),
779 		  __entry->seen,
780 		  __entry->open,
781 		  __entry->need_journal_commit,
782 		  __entry->discarded,
783 		  __entry->err)
784 );
785 
786 DEFINE_EVENT(discard_buckets_class, discard_buckets,
787 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
788 		 u64 need_journal_commit, u64 discarded, const char *err),
789 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
790 );
791 
792 DEFINE_EVENT(discard_buckets_class, discard_buckets_fast,
793 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
794 		 u64 need_journal_commit, u64 discarded, const char *err),
795 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
796 );
797 
798 TRACE_EVENT(bucket_invalidate,
799 	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
800 	TP_ARGS(c, dev, bucket, sectors),
801 
802 	TP_STRUCT__entry(
803 		__field(dev_t,		dev			)
804 		__field(u32,		dev_idx			)
805 		__field(u32,		sectors			)
806 		__field(u64,		bucket			)
807 	),
808 
809 	TP_fast_assign(
810 		__entry->dev		= c->dev;
811 		__entry->dev_idx	= dev;
812 		__entry->sectors	= sectors;
813 		__entry->bucket		= bucket;
814 	),
815 
816 	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
817 		  MAJOR(__entry->dev), MINOR(__entry->dev),
818 		  __entry->dev_idx, __entry->bucket,
819 		  __entry->sectors)
820 );
821 
822 /* Moving IO */
823 
824 DEFINE_EVENT(fs_str, io_move,
825 	TP_PROTO(struct bch_fs *c, const char *str),
826 	TP_ARGS(c, str)
827 );
828 
829 DEFINE_EVENT(fs_str, io_move_read,
830 	TP_PROTO(struct bch_fs *c, const char *str),
831 	TP_ARGS(c, str)
832 );
833 
834 DEFINE_EVENT(fs_str, io_move_write,
835 	TP_PROTO(struct bch_fs *c, const char *str),
836 	TP_ARGS(c, str)
837 );
838 
839 DEFINE_EVENT(fs_str, io_move_finish,
840 	TP_PROTO(struct bch_fs *c, const char *str),
841 	TP_ARGS(c, str)
842 );
843 
844 DEFINE_EVENT(fs_str, io_move_fail,
845 	TP_PROTO(struct bch_fs *c, const char *str),
846 	TP_ARGS(c, str)
847 );
848 
849 DEFINE_EVENT(fs_str, io_move_write_fail,
850 	TP_PROTO(struct bch_fs *c, const char *str),
851 	TP_ARGS(c, str)
852 );
853 
854 DEFINE_EVENT(fs_str, io_move_start_fail,
855 	TP_PROTO(struct bch_fs *c, const char *str),
856 	TP_ARGS(c, str)
857 );
858 
859 TRACE_EVENT(move_data,
860 	TP_PROTO(struct bch_fs *c,
861 		 struct bch_move_stats *stats),
862 	TP_ARGS(c, stats),
863 
864 	TP_STRUCT__entry(
865 		__field(dev_t,		dev		)
866 		__field(u64,		keys_moved	)
867 		__field(u64,		keys_raced	)
868 		__field(u64,		sectors_seen	)
869 		__field(u64,		sectors_moved	)
870 		__field(u64,		sectors_raced	)
871 	),
872 
873 	TP_fast_assign(
874 		__entry->dev		= c->dev;
875 		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
876 		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
877 		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
878 		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
879 		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
880 	),
881 
882 	TP_printk("%d,%d keys moved %llu raced %llu"
883 		  "sectors seen %llu moved %llu raced %llu",
884 		  MAJOR(__entry->dev), MINOR(__entry->dev),
885 		  __entry->keys_moved,
886 		  __entry->keys_raced,
887 		  __entry->sectors_seen,
888 		  __entry->sectors_moved,
889 		  __entry->sectors_raced)
890 );
891 
892 TRACE_EVENT(copygc,
893 	TP_PROTO(struct bch_fs *c,
894 		 u64 buckets,
895 		 u64 sectors_seen,
896 		 u64 sectors_moved),
897 	TP_ARGS(c, buckets, sectors_seen, sectors_moved),
898 
899 	TP_STRUCT__entry(
900 		__field(dev_t,		dev			)
901 		__field(u64,		buckets			)
902 		__field(u64,		sectors_seen		)
903 		__field(u64,		sectors_moved		)
904 	),
905 
906 	TP_fast_assign(
907 		__entry->dev			= c->dev;
908 		__entry->buckets		= buckets;
909 		__entry->sectors_seen		= sectors_seen;
910 		__entry->sectors_moved		= sectors_moved;
911 	),
912 
913 	TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu",
914 		  MAJOR(__entry->dev), MINOR(__entry->dev),
915 		  __entry->buckets,
916 		  __entry->sectors_seen,
917 		  __entry->sectors_moved)
918 );
919 
920 TRACE_EVENT(copygc_wait,
921 	TP_PROTO(struct bch_fs *c,
922 		 u64 wait_amount, u64 until),
923 	TP_ARGS(c, wait_amount, until),
924 
925 	TP_STRUCT__entry(
926 		__field(dev_t,		dev			)
927 		__field(u64,		wait_amount		)
928 		__field(u64,		until			)
929 	),
930 
931 	TP_fast_assign(
932 		__entry->dev		= c->dev;
933 		__entry->wait_amount	= wait_amount;
934 		__entry->until		= until;
935 	),
936 
937 	TP_printk("%d,%u waiting for %llu sectors until %llu",
938 		  MAJOR(__entry->dev), MINOR(__entry->dev),
939 		  __entry->wait_amount, __entry->until)
940 );
941 
942 /* btree transactions: */
943 
944 DECLARE_EVENT_CLASS(transaction_event,
945 	TP_PROTO(struct btree_trans *trans,
946 		 unsigned long caller_ip),
947 	TP_ARGS(trans, caller_ip),
948 
949 	TP_STRUCT__entry(
950 		__array(char,			trans_fn, 32	)
951 		__field(unsigned long,		caller_ip	)
952 	),
953 
954 	TP_fast_assign(
955 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
956 		__entry->caller_ip		= caller_ip;
957 	),
958 
959 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
960 );
961 
962 DEFINE_EVENT(transaction_event,	transaction_commit,
963 	TP_PROTO(struct btree_trans *trans,
964 		 unsigned long caller_ip),
965 	TP_ARGS(trans, caller_ip)
966 );
967 
968 DEFINE_EVENT(transaction_event,	trans_restart_injected,
969 	TP_PROTO(struct btree_trans *trans,
970 		 unsigned long caller_ip),
971 	TP_ARGS(trans, caller_ip)
972 );
973 
974 TRACE_EVENT(trans_restart_split_race,
975 	TP_PROTO(struct btree_trans *trans,
976 		 unsigned long caller_ip,
977 		 struct btree *b),
978 	TP_ARGS(trans, caller_ip, b),
979 
980 	TP_STRUCT__entry(
981 		__array(char,			trans_fn, 32	)
982 		__field(unsigned long,		caller_ip	)
983 		__field(u8,			level		)
984 		__field(u16,			written		)
985 		__field(u16,			blocks		)
986 		__field(u16,			u64s_remaining	)
987 	),
988 
989 	TP_fast_assign(
990 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
991 		__entry->caller_ip		= caller_ip;
992 		__entry->level		= b->c.level;
993 		__entry->written	= b->written;
994 		__entry->blocks		= btree_blocks(trans->c);
995 		__entry->u64s_remaining	= bch2_btree_keys_u64s_remaining(b);
996 	),
997 
998 	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
999 		  __entry->trans_fn, (void *) __entry->caller_ip,
1000 		  __entry->level,
1001 		  __entry->written, __entry->blocks,
1002 		  __entry->u64s_remaining)
1003 );
1004 
1005 TRACE_EVENT(trans_blocked_journal_reclaim,
1006 	TP_PROTO(struct btree_trans *trans,
1007 		 unsigned long caller_ip),
1008 	TP_ARGS(trans, caller_ip),
1009 
1010 	TP_STRUCT__entry(
1011 		__array(char,			trans_fn, 32	)
1012 		__field(unsigned long,		caller_ip	)
1013 
1014 		__field(unsigned long,		key_cache_nr_keys	)
1015 		__field(unsigned long,		key_cache_nr_dirty	)
1016 		__field(long,			must_wait		)
1017 	),
1018 
1019 	TP_fast_assign(
1020 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1021 		__entry->caller_ip		= caller_ip;
1022 		__entry->key_cache_nr_keys	= atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1023 		__entry->key_cache_nr_dirty	= atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1024 		__entry->must_wait		= __bch2_btree_key_cache_must_wait(trans->c);
1025 	),
1026 
1027 	TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1028 		  __entry->trans_fn, (void *) __entry->caller_ip,
1029 		  __entry->key_cache_nr_keys,
1030 		  __entry->key_cache_nr_dirty,
1031 		  __entry->must_wait)
1032 );
1033 
1034 TRACE_EVENT(trans_restart_journal_preres_get,
1035 	TP_PROTO(struct btree_trans *trans,
1036 		 unsigned long caller_ip,
1037 		 unsigned flags),
1038 	TP_ARGS(trans, caller_ip, flags),
1039 
1040 	TP_STRUCT__entry(
1041 		__array(char,			trans_fn, 32	)
1042 		__field(unsigned long,		caller_ip	)
1043 		__field(unsigned,		flags		)
1044 	),
1045 
1046 	TP_fast_assign(
1047 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1048 		__entry->caller_ip		= caller_ip;
1049 		__entry->flags			= flags;
1050 	),
1051 
1052 	TP_printk("%s %pS %x", __entry->trans_fn,
1053 		  (void *) __entry->caller_ip,
1054 		  __entry->flags)
1055 );
1056 
1057 DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
1058 	TP_PROTO(struct btree_trans *trans,
1059 		 unsigned long caller_ip),
1060 	TP_ARGS(trans, caller_ip)
1061 );
1062 
1063 DEFINE_EVENT(transaction_event,	trans_traverse_all,
1064 	TP_PROTO(struct btree_trans *trans,
1065 		 unsigned long caller_ip),
1066 	TP_ARGS(trans, caller_ip)
1067 );
1068 
1069 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
1070 	TP_PROTO(struct btree_trans *trans,
1071 		 unsigned long caller_ip),
1072 	TP_ARGS(trans, caller_ip)
1073 );
1074 
1075 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1076 	TP_PROTO(struct btree_trans *trans,
1077 		 unsigned long caller_ip,
1078 		 const char *paths),
1079 	TP_ARGS(trans, caller_ip, paths)
1080 );
1081 
1082 DECLARE_EVENT_CLASS(transaction_restart_iter,
1083 	TP_PROTO(struct btree_trans *trans,
1084 		 unsigned long caller_ip,
1085 		 struct btree_path *path),
1086 	TP_ARGS(trans, caller_ip, path),
1087 
1088 	TP_STRUCT__entry(
1089 		__array(char,			trans_fn, 32	)
1090 		__field(unsigned long,		caller_ip	)
1091 		__field(u8,			btree_id	)
1092 		TRACE_BPOS_entries(pos)
1093 	),
1094 
1095 	TP_fast_assign(
1096 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1097 		__entry->caller_ip		= caller_ip;
1098 		__entry->btree_id		= path->btree_id;
1099 		TRACE_BPOS_assign(pos, path->pos)
1100 	),
1101 
1102 	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1103 		  __entry->trans_fn,
1104 		  (void *) __entry->caller_ip,
1105 		  bch2_btree_id_str(__entry->btree_id),
1106 		  __entry->pos_inode,
1107 		  __entry->pos_offset,
1108 		  __entry->pos_snapshot)
1109 );
1110 
1111 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1112 	TP_PROTO(struct btree_trans *trans,
1113 		 unsigned long caller_ip,
1114 		 struct btree_path *path),
1115 	TP_ARGS(trans, caller_ip, path)
1116 );
1117 
1118 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1119 	TP_PROTO(struct btree_trans *trans,
1120 		 unsigned long caller_ip,
1121 		 struct btree_path *path),
1122 	TP_ARGS(trans, caller_ip, path)
1123 );
1124 
1125 TRACE_EVENT(trans_restart_upgrade,
1126 	TP_PROTO(struct btree_trans *trans,
1127 		 unsigned long caller_ip,
1128 		 struct btree_path *path,
1129 		 unsigned old_locks_want,
1130 		 unsigned new_locks_want,
1131 		 struct get_locks_fail *f),
1132 	TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1133 
1134 	TP_STRUCT__entry(
1135 		__array(char,			trans_fn, 32	)
1136 		__field(unsigned long,		caller_ip	)
1137 		__field(u8,			btree_id	)
1138 		__field(u8,			old_locks_want	)
1139 		__field(u8,			new_locks_want	)
1140 		__field(u8,			level		)
1141 		__field(u32,			path_seq	)
1142 		__field(u32,			node_seq	)
1143 		TRACE_BPOS_entries(pos)
1144 	),
1145 
1146 	TP_fast_assign(
1147 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1148 		__entry->caller_ip		= caller_ip;
1149 		__entry->btree_id		= path->btree_id;
1150 		__entry->old_locks_want		= old_locks_want;
1151 		__entry->new_locks_want		= new_locks_want;
1152 		__entry->level			= f->l;
1153 		__entry->path_seq		= path->l[f->l].lock_seq;
1154 		__entry->node_seq		= IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1155 		TRACE_BPOS_assign(pos, path->pos)
1156 	),
1157 
1158 	TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1159 		  __entry->trans_fn,
1160 		  (void *) __entry->caller_ip,
1161 		  bch2_btree_id_str(__entry->btree_id),
1162 		  __entry->pos_inode,
1163 		  __entry->pos_offset,
1164 		  __entry->pos_snapshot,
1165 		  __entry->old_locks_want,
1166 		  __entry->new_locks_want,
1167 		  __entry->level,
1168 		  __entry->path_seq,
1169 		  __entry->node_seq)
1170 );
1171 
1172 DEFINE_EVENT(trans_str,	trans_restart_relock,
1173 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1174 	TP_ARGS(trans, caller_ip, str)
1175 );
1176 
1177 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1178 	TP_PROTO(struct btree_trans *trans,
1179 		 unsigned long caller_ip,
1180 		 struct btree_path *path),
1181 	TP_ARGS(trans, caller_ip, path)
1182 );
1183 
1184 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1185 	TP_PROTO(struct btree_trans *trans,
1186 		 unsigned long caller_ip,
1187 		 struct btree_path *path),
1188 	TP_ARGS(trans, caller_ip, path)
1189 );
1190 
1191 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_after_fill,
1192 	TP_PROTO(struct btree_trans *trans,
1193 		 unsigned long caller_ip,
1194 		 struct btree_path *path),
1195 	TP_ARGS(trans, caller_ip, path)
1196 );
1197 
1198 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_upgrade,
1199 	TP_PROTO(struct btree_trans *trans,
1200 		 unsigned long caller_ip),
1201 	TP_ARGS(trans, caller_ip)
1202 );
1203 
1204 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1205 	TP_PROTO(struct btree_trans *trans,
1206 		 unsigned long caller_ip,
1207 		 struct btree_path *path),
1208 	TP_ARGS(trans, caller_ip, path)
1209 );
1210 
1211 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1212 	TP_PROTO(struct btree_trans *trans,
1213 		 unsigned long caller_ip,
1214 		 struct btree_path *path),
1215 	TP_ARGS(trans, caller_ip, path)
1216 );
1217 
1218 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1219 	TP_PROTO(struct btree_trans *trans,
1220 		 unsigned long caller_ip,
1221 		 struct btree_path *path),
1222 	TP_ARGS(trans, caller_ip, path)
1223 );
1224 
1225 DEFINE_EVENT(transaction_restart_iter,	trans_restart_traverse,
1226 	TP_PROTO(struct btree_trans *trans,
1227 		 unsigned long caller_ip,
1228 		 struct btree_path *path),
1229 	TP_ARGS(trans, caller_ip, path)
1230 );
1231 
1232 DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1233 	TP_PROTO(struct btree_trans *trans,
1234 		 unsigned long caller_ip,
1235 		 struct btree_path *path),
1236 	TP_ARGS(trans, caller_ip, path)
1237 );
1238 
1239 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1240 	TP_PROTO(struct btree_trans *trans,
1241 		 const char *cycle),
1242 	TP_ARGS(trans, cycle)
1243 );
1244 
1245 DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1246 	TP_PROTO(struct btree_trans *trans,
1247 		 unsigned long caller_ip),
1248 	TP_ARGS(trans, caller_ip)
1249 );
1250 
1251 TRACE_EVENT(trans_restart_would_deadlock_write,
1252 	TP_PROTO(struct btree_trans *trans),
1253 	TP_ARGS(trans),
1254 
1255 	TP_STRUCT__entry(
1256 		__array(char,			trans_fn, 32	)
1257 	),
1258 
1259 	TP_fast_assign(
1260 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1261 	),
1262 
1263 	TP_printk("%s", __entry->trans_fn)
1264 );
1265 
1266 TRACE_EVENT(trans_restart_mem_realloced,
1267 	TP_PROTO(struct btree_trans *trans,
1268 		 unsigned long caller_ip,
1269 		 unsigned long bytes),
1270 	TP_ARGS(trans, caller_ip, bytes),
1271 
1272 	TP_STRUCT__entry(
1273 		__array(char,			trans_fn, 32	)
1274 		__field(unsigned long,		caller_ip	)
1275 		__field(unsigned long,		bytes		)
1276 	),
1277 
1278 	TP_fast_assign(
1279 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1280 		__entry->caller_ip	= caller_ip;
1281 		__entry->bytes		= bytes;
1282 	),
1283 
1284 	TP_printk("%s %pS bytes %lu",
1285 		  __entry->trans_fn,
1286 		  (void *) __entry->caller_ip,
1287 		  __entry->bytes)
1288 );
1289 
1290 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1291 	TP_PROTO(struct btree_trans *trans,
1292 		 unsigned long caller_ip,
1293 		 struct btree_path *path,
1294 		 unsigned old_u64s,
1295 		 unsigned new_u64s),
1296 	TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1297 
1298 	TP_STRUCT__entry(
1299 		__array(char,			trans_fn, 32	)
1300 		__field(unsigned long,		caller_ip	)
1301 		__field(enum btree_id,		btree_id	)
1302 		TRACE_BPOS_entries(pos)
1303 		__field(u32,			old_u64s	)
1304 		__field(u32,			new_u64s	)
1305 	),
1306 
1307 	TP_fast_assign(
1308 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1309 		__entry->caller_ip		= caller_ip;
1310 
1311 		__entry->btree_id	= path->btree_id;
1312 		TRACE_BPOS_assign(pos, path->pos);
1313 		__entry->old_u64s	= old_u64s;
1314 		__entry->new_u64s	= new_u64s;
1315 	),
1316 
1317 	TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1318 		  __entry->trans_fn,
1319 		  (void *) __entry->caller_ip,
1320 		  bch2_btree_id_str(__entry->btree_id),
1321 		  __entry->pos_inode,
1322 		  __entry->pos_offset,
1323 		  __entry->pos_snapshot,
1324 		  __entry->old_u64s,
1325 		  __entry->new_u64s)
1326 );
1327 
1328 DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1329 	TP_PROTO(struct btree_trans *trans,
1330 		 unsigned long caller_ip),
1331 	TP_ARGS(trans, caller_ip)
1332 );
1333 
1334 TRACE_EVENT(path_downgrade,
1335 	TP_PROTO(struct btree_trans *trans,
1336 		 unsigned long caller_ip,
1337 		 struct btree_path *path,
1338 		 unsigned old_locks_want),
1339 	TP_ARGS(trans, caller_ip, path, old_locks_want),
1340 
1341 	TP_STRUCT__entry(
1342 		__array(char,			trans_fn, 32	)
1343 		__field(unsigned long,		caller_ip	)
1344 		__field(unsigned,		old_locks_want	)
1345 		__field(unsigned,		new_locks_want	)
1346 		__field(unsigned,		btree		)
1347 		TRACE_BPOS_entries(pos)
1348 	),
1349 
1350 	TP_fast_assign(
1351 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1352 		__entry->caller_ip		= caller_ip;
1353 		__entry->old_locks_want		= old_locks_want;
1354 		__entry->new_locks_want		= path->locks_want;
1355 		__entry->btree			= path->btree_id;
1356 		TRACE_BPOS_assign(pos, path->pos);
1357 	),
1358 
1359 	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1360 		  __entry->trans_fn,
1361 		  (void *) __entry->caller_ip,
1362 		  __entry->old_locks_want,
1363 		  __entry->new_locks_want,
1364 		  bch2_btree_id_str(__entry->btree),
1365 		  __entry->pos_inode,
1366 		  __entry->pos_offset,
1367 		  __entry->pos_snapshot)
1368 );
1369 
1370 TRACE_EVENT(key_cache_fill,
1371 	TP_PROTO(struct btree_trans *trans, const char *key),
1372 	TP_ARGS(trans, key),
1373 
1374 	TP_STRUCT__entry(
1375 		__array(char,		trans_fn, 32	)
1376 		__string(key,		key			)
1377 	),
1378 
1379 	TP_fast_assign(
1380 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1381 		__assign_str(key);
1382 	),
1383 
1384 	TP_printk("%s %s", __entry->trans_fn, __get_str(key))
1385 );
1386 
1387 TRACE_EVENT(write_buffer_flush,
1388 	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1389 	TP_ARGS(trans, nr, skipped, fast, size),
1390 
1391 	TP_STRUCT__entry(
1392 		__field(size_t,		nr		)
1393 		__field(size_t,		skipped		)
1394 		__field(size_t,		fast		)
1395 		__field(size_t,		size		)
1396 	),
1397 
1398 	TP_fast_assign(
1399 		__entry->nr	= nr;
1400 		__entry->skipped = skipped;
1401 		__entry->fast	= fast;
1402 		__entry->size	= size;
1403 	),
1404 
1405 	TP_printk("%zu/%zu skipped %zu fast %zu",
1406 		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1407 );
1408 
1409 TRACE_EVENT(write_buffer_flush_sync,
1410 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1411 	TP_ARGS(trans, caller_ip),
1412 
1413 	TP_STRUCT__entry(
1414 		__array(char,			trans_fn, 32	)
1415 		__field(unsigned long,		caller_ip	)
1416 	),
1417 
1418 	TP_fast_assign(
1419 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1420 		__entry->caller_ip		= caller_ip;
1421 	),
1422 
1423 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1424 );
1425 
1426 TRACE_EVENT(write_buffer_flush_slowpath,
1427 	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1428 	TP_ARGS(trans, slowpath, total),
1429 
1430 	TP_STRUCT__entry(
1431 		__field(size_t,		slowpath	)
1432 		__field(size_t,		total		)
1433 	),
1434 
1435 	TP_fast_assign(
1436 		__entry->slowpath	= slowpath;
1437 		__entry->total		= total;
1438 	),
1439 
1440 	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1441 );
1442 
1443 TRACE_EVENT(write_buffer_maybe_flush,
1444 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key),
1445 	TP_ARGS(trans, caller_ip, key),
1446 
1447 	TP_STRUCT__entry(
1448 		__array(char,			trans_fn, 32	)
1449 		__field(unsigned long,		caller_ip	)
1450 		__string(key,			key		)
1451 	),
1452 
1453 	TP_fast_assign(
1454 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1455 		__assign_str(key);
1456 	),
1457 
1458 	TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key))
1459 );
1460 
1461 DEFINE_EVENT(fs_str, rebalance_extent,
1462 	TP_PROTO(struct bch_fs *c, const char *str),
1463 	TP_ARGS(c, str)
1464 );
1465 
1466 DEFINE_EVENT(fs_str, data_update,
1467 	TP_PROTO(struct bch_fs *c, const char *str),
1468 	TP_ARGS(c, str)
1469 );
1470 
1471 TRACE_EVENT(error_downcast,
1472 	TP_PROTO(int bch_err, int std_err, unsigned long ip),
1473 	TP_ARGS(bch_err, std_err, ip),
1474 
1475 	TP_STRUCT__entry(
1476 		__array(char,		bch_err, 32		)
1477 		__array(char,		std_err, 32		)
1478 		__array(char,		ip, 32			)
1479 	),
1480 
1481 	TP_fast_assign(
1482 		strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1483 		strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1484 		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1485 	),
1486 
1487 	TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1488 );
1489 
1490 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1491 
1492 TRACE_EVENT(update_by_path,
1493 	TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1494 		 struct btree_insert_entry *i, bool overwrite),
1495 	TP_ARGS(trans, path, i, overwrite),
1496 
1497 	TP_STRUCT__entry(
1498 		__array(char,			trans_fn, 32	)
1499 		__field(btree_path_idx_t,	path_idx	)
1500 		__field(u8,			btree_id	)
1501 		TRACE_BPOS_entries(pos)
1502 		__field(u8,			overwrite	)
1503 		__field(btree_path_idx_t,	update_idx	)
1504 		__field(btree_path_idx_t,	nr_updates	)
1505 	),
1506 
1507 	TP_fast_assign(
1508 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1509 		__entry->path_idx		= path - trans->paths;
1510 		__entry->btree_id		= path->btree_id;
1511 		TRACE_BPOS_assign(pos, path->pos);
1512 		__entry->overwrite		= overwrite;
1513 		__entry->update_idx		= i - trans->updates;
1514 		__entry->nr_updates		= trans->nr_updates;
1515 	),
1516 
1517 	TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1518 		  __entry->trans_fn,
1519 		  __entry->path_idx,
1520 		  bch2_btree_id_str(__entry->btree_id),
1521 		  __entry->pos_inode,
1522 		  __entry->pos_offset,
1523 		  __entry->pos_snapshot,
1524 		  __entry->overwrite,
1525 		  __entry->update_idx,
1526 		  __entry->nr_updates)
1527 );
1528 
1529 TRACE_EVENT(btree_path_lock,
1530 	TP_PROTO(struct btree_trans *trans,
1531 		 unsigned long caller_ip,
1532 		 struct btree_bkey_cached_common *b),
1533 	TP_ARGS(trans, caller_ip, b),
1534 
1535 	TP_STRUCT__entry(
1536 		__array(char,			trans_fn, 32	)
1537 		__field(unsigned long,		caller_ip	)
1538 		__field(u8,			btree_id	)
1539 		__field(u8,			level		)
1540 		__array(char,			node, 24	)
1541 		__field(u32,			lock_seq	)
1542 	),
1543 
1544 	TP_fast_assign(
1545 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1546 		__entry->caller_ip		= caller_ip;
1547 		__entry->btree_id		= b->btree_id;
1548 		__entry->level			= b->level;
1549 
1550 		scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1551 		__entry->lock_seq		= six_lock_seq(&b->lock);
1552 	),
1553 
1554 	TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1555 		  __entry->trans_fn,
1556 		  (void *) __entry->caller_ip,
1557 		  bch2_btree_id_str(__entry->btree_id),
1558 		  __entry->level,
1559 		  __entry->node,
1560 		  __entry->lock_seq)
1561 );
1562 
1563 DECLARE_EVENT_CLASS(btree_path_ev,
1564 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1565 	TP_ARGS(trans, path),
1566 
1567 	TP_STRUCT__entry(
1568 		__field(u16,			idx		)
1569 		__field(u8,			ref		)
1570 		__field(u8,			btree_id	)
1571 		TRACE_BPOS_entries(pos)
1572 	),
1573 
1574 	TP_fast_assign(
1575 		__entry->idx			= path - trans->paths;
1576 		__entry->ref			= path->ref;
1577 		__entry->btree_id		= path->btree_id;
1578 		TRACE_BPOS_assign(pos, path->pos);
1579 	),
1580 
1581 	TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1582 		  __entry->idx, __entry->ref,
1583 		  bch2_btree_id_str(__entry->btree_id),
1584 		  __entry->pos_inode,
1585 		  __entry->pos_offset,
1586 		  __entry->pos_snapshot)
1587 );
1588 
1589 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1590 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1591 	TP_ARGS(trans, path)
1592 );
1593 
1594 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1595 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1596 	TP_ARGS(trans, path)
1597 );
1598 
1599 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1600 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1601 	TP_ARGS(trans, path)
1602 );
1603 
1604 TRACE_EVENT(btree_path_alloc,
1605 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1606 	TP_ARGS(trans, path),
1607 
1608 	TP_STRUCT__entry(
1609 		__field(btree_path_idx_t,	idx		)
1610 		__field(u8,			locks_want	)
1611 		__field(u8,			btree_id	)
1612 		TRACE_BPOS_entries(pos)
1613 	),
1614 
1615 	TP_fast_assign(
1616 		__entry->idx			= path - trans->paths;
1617 		__entry->locks_want		= path->locks_want;
1618 		__entry->btree_id		= path->btree_id;
1619 		TRACE_BPOS_assign(pos, path->pos);
1620 	),
1621 
1622 	TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1623 		  __entry->idx,
1624 		  bch2_btree_id_str(__entry->btree_id),
1625 		  __entry->locks_want,
1626 		  __entry->pos_inode,
1627 		  __entry->pos_offset,
1628 		  __entry->pos_snapshot)
1629 );
1630 
1631 TRACE_EVENT(btree_path_get,
1632 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1633 	TP_ARGS(trans, path, new_pos),
1634 
1635 	TP_STRUCT__entry(
1636 		__field(btree_path_idx_t,	idx		)
1637 		__field(u8,			ref		)
1638 		__field(u8,			preserve	)
1639 		__field(u8,			locks_want	)
1640 		__field(u8,			btree_id	)
1641 		TRACE_BPOS_entries(old_pos)
1642 		TRACE_BPOS_entries(new_pos)
1643 	),
1644 
1645 	TP_fast_assign(
1646 		__entry->idx			= path - trans->paths;
1647 		__entry->ref			= path->ref;
1648 		__entry->preserve		= path->preserve;
1649 		__entry->locks_want		= path->locks_want;
1650 		__entry->btree_id		= path->btree_id;
1651 		TRACE_BPOS_assign(old_pos, path->pos);
1652 		TRACE_BPOS_assign(new_pos, *new_pos);
1653 	),
1654 
1655 	TP_printk("    path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1656 		  __entry->idx,
1657 		  __entry->ref,
1658 		  __entry->preserve,
1659 		  bch2_btree_id_str(__entry->btree_id),
1660 		  __entry->locks_want,
1661 		  __entry->old_pos_inode,
1662 		  __entry->old_pos_offset,
1663 		  __entry->old_pos_snapshot,
1664 		  __entry->new_pos_inode,
1665 		  __entry->new_pos_offset,
1666 		  __entry->new_pos_snapshot)
1667 );
1668 
1669 DECLARE_EVENT_CLASS(btree_path_clone,
1670 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1671 	TP_ARGS(trans, path, new),
1672 
1673 	TP_STRUCT__entry(
1674 		__field(btree_path_idx_t,	idx		)
1675 		__field(u8,			new_idx		)
1676 		__field(u8,			btree_id	)
1677 		__field(u8,			ref		)
1678 		__field(u8,			preserve	)
1679 		TRACE_BPOS_entries(pos)
1680 	),
1681 
1682 	TP_fast_assign(
1683 		__entry->idx			= path - trans->paths;
1684 		__entry->new_idx		= new - trans->paths;
1685 		__entry->btree_id		= path->btree_id;
1686 		__entry->ref			= path->ref;
1687 		__entry->preserve		= path->preserve;
1688 		TRACE_BPOS_assign(pos, path->pos);
1689 	),
1690 
1691 	TP_printk("  path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1692 		  __entry->idx,
1693 		  __entry->ref,
1694 		  __entry->preserve,
1695 		  bch2_btree_id_str(__entry->btree_id),
1696 		  __entry->pos_inode,
1697 		  __entry->pos_offset,
1698 		  __entry->pos_snapshot,
1699 		  __entry->new_idx)
1700 );
1701 
1702 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1703 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1704 	TP_ARGS(trans, path, new)
1705 );
1706 
1707 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1708 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1709 	TP_ARGS(trans, path, new)
1710 );
1711 
1712 DECLARE_EVENT_CLASS(btree_path_traverse,
1713 	TP_PROTO(struct btree_trans *trans,
1714 		 struct btree_path *path),
1715 	TP_ARGS(trans, path),
1716 
1717 	TP_STRUCT__entry(
1718 		__array(char,			trans_fn, 32	)
1719 		__field(btree_path_idx_t,	idx		)
1720 		__field(u8,			ref		)
1721 		__field(u8,			preserve	)
1722 		__field(u8,			should_be_locked )
1723 		__field(u8,			btree_id	)
1724 		__field(u8,			level		)
1725 		TRACE_BPOS_entries(pos)
1726 		__field(u8,			locks_want	)
1727 		__field(u8,			nodes_locked	)
1728 		__array(char,			node0, 24	)
1729 		__array(char,			node1, 24	)
1730 		__array(char,			node2, 24	)
1731 		__array(char,			node3, 24	)
1732 	),
1733 
1734 	TP_fast_assign(
1735 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1736 
1737 		__entry->idx			= path - trans->paths;
1738 		__entry->ref			= path->ref;
1739 		__entry->preserve		= path->preserve;
1740 		__entry->btree_id		= path->btree_id;
1741 		__entry->level			= path->level;
1742 		TRACE_BPOS_assign(pos, path->pos);
1743 
1744 		__entry->locks_want		= path->locks_want;
1745 		__entry->nodes_locked		= path->nodes_locked;
1746 		struct btree *b = path->l[0].b;
1747 		if (IS_ERR(b))
1748 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1749 		else
1750 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1751 		b = path->l[1].b;
1752 		if (IS_ERR(b))
1753 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1754 		else
1755 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1756 		b = path->l[2].b;
1757 		if (IS_ERR(b))
1758 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1759 		else
1760 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1761 		b = path->l[3].b;
1762 		if (IS_ERR(b))
1763 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1764 		else
1765 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1766 	),
1767 
1768 	TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1769 		  "locks %u %u %u %u node %s %s %s %s",
1770 		  __entry->trans_fn,
1771 		  __entry->idx,
1772 		  __entry->ref,
1773 		  __entry->preserve,
1774 		  bch2_btree_id_str(__entry->btree_id),
1775 		  __entry->pos_inode,
1776 		  __entry->pos_offset,
1777 		  __entry->pos_snapshot,
1778 		  __entry->level,
1779 		  __entry->locks_want,
1780 		  (__entry->nodes_locked >> 6) & 3,
1781 		  (__entry->nodes_locked >> 4) & 3,
1782 		  (__entry->nodes_locked >> 2) & 3,
1783 		  (__entry->nodes_locked >> 0) & 3,
1784 		  __entry->node3,
1785 		  __entry->node2,
1786 		  __entry->node1,
1787 		  __entry->node0)
1788 );
1789 
1790 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1791 	TP_PROTO(struct btree_trans *trans,
1792 		 struct btree_path *path),
1793 	TP_ARGS(trans, path)
1794 );
1795 
1796 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1797 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1798 	TP_ARGS(trans, path)
1799 );
1800 
1801 TRACE_EVENT(btree_path_set_pos,
1802 	TP_PROTO(struct btree_trans *trans,
1803 		 struct btree_path *path,
1804 		 struct bpos *new_pos),
1805 	TP_ARGS(trans, path, new_pos),
1806 
1807 	TP_STRUCT__entry(
1808 		__field(btree_path_idx_t,	idx		)
1809 		__field(u8,			ref		)
1810 		__field(u8,			preserve	)
1811 		__field(u8,			btree_id	)
1812 		TRACE_BPOS_entries(old_pos)
1813 		TRACE_BPOS_entries(new_pos)
1814 		__field(u8,			locks_want	)
1815 		__field(u8,			nodes_locked	)
1816 		__array(char,			node0, 24	)
1817 		__array(char,			node1, 24	)
1818 		__array(char,			node2, 24	)
1819 		__array(char,			node3, 24	)
1820 	),
1821 
1822 	TP_fast_assign(
1823 		__entry->idx			= path - trans->paths;
1824 		__entry->ref			= path->ref;
1825 		__entry->preserve		= path->preserve;
1826 		__entry->btree_id		= path->btree_id;
1827 		TRACE_BPOS_assign(old_pos, path->pos);
1828 		TRACE_BPOS_assign(new_pos, *new_pos);
1829 
1830 		__entry->nodes_locked		= path->nodes_locked;
1831 		struct btree *b = path->l[0].b;
1832 		if (IS_ERR(b))
1833 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1834 		else
1835 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1836 		b = path->l[1].b;
1837 		if (IS_ERR(b))
1838 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1839 		else
1840 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1841 		b = path->l[2].b;
1842 		if (IS_ERR(b))
1843 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1844 		else
1845 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1846 		b = path->l[3].b;
1847 		if (IS_ERR(b))
1848 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1849 		else
1850 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1851 	),
1852 
1853 	TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1854 		  "locks %u %u %u %u node %s %s %s %s",
1855 		  __entry->idx,
1856 		  __entry->ref,
1857 		  __entry->preserve,
1858 		  bch2_btree_id_str(__entry->btree_id),
1859 		  __entry->old_pos_inode,
1860 		  __entry->old_pos_offset,
1861 		  __entry->old_pos_snapshot,
1862 		  __entry->new_pos_inode,
1863 		  __entry->new_pos_offset,
1864 		  __entry->new_pos_snapshot,
1865 		  (__entry->nodes_locked >> 6) & 3,
1866 		  (__entry->nodes_locked >> 4) & 3,
1867 		  (__entry->nodes_locked >> 2) & 3,
1868 		  (__entry->nodes_locked >> 0) & 3,
1869 		  __entry->node3,
1870 		  __entry->node2,
1871 		  __entry->node1,
1872 		  __entry->node0)
1873 );
1874 
1875 TRACE_EVENT(btree_path_free,
1876 	TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1877 	TP_ARGS(trans, path, dup),
1878 
1879 	TP_STRUCT__entry(
1880 		__field(btree_path_idx_t,	idx		)
1881 		__field(u8,			preserve	)
1882 		__field(u8,			should_be_locked)
1883 		__field(s8,			dup		)
1884 		__field(u8,			dup_locked	)
1885 	),
1886 
1887 	TP_fast_assign(
1888 		__entry->idx			= path;
1889 		__entry->preserve		= trans->paths[path].preserve;
1890 		__entry->should_be_locked	= trans->paths[path].should_be_locked;
1891 		__entry->dup			= dup ? dup - trans->paths  : -1;
1892 		__entry->dup_locked		= dup ? btree_node_locked(dup, dup->level) : 0;
1893 	),
1894 
1895 	TP_printk("   path %3u %c %c dup %2i locked %u", __entry->idx,
1896 		  __entry->preserve ? 'P' : ' ',
1897 		  __entry->should_be_locked ? 'S' : ' ',
1898 		  __entry->dup,
1899 		  __entry->dup_locked)
1900 );
1901 
1902 TRACE_EVENT(btree_path_free_trans_begin,
1903 	TP_PROTO(btree_path_idx_t path),
1904 	TP_ARGS(path),
1905 
1906 	TP_STRUCT__entry(
1907 		__field(btree_path_idx_t,	idx		)
1908 	),
1909 
1910 	TP_fast_assign(
1911 		__entry->idx			= path;
1912 	),
1913 
1914 	TP_printk("   path %3u", __entry->idx)
1915 );
1916 
1917 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1918 #ifndef _TRACE_BCACHEFS_H
1919 
trace_update_by_path(struct btree_trans * trans,struct btree_path * path,struct btree_insert_entry * i,bool overwrite)1920 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1921 					struct btree_insert_entry *i, bool overwrite) {}
trace_btree_path_lock(struct btree_trans * trans,unsigned long caller_ip,struct btree_bkey_cached_common * b)1922 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
trace_btree_path_get_ll(struct btree_trans * trans,struct btree_path * path)1923 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_put_ll(struct btree_trans * trans,struct btree_path * path)1924 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_should_be_locked(struct btree_trans * trans,struct btree_path * path)1925 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_alloc(struct btree_trans * trans,struct btree_path * path)1926 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_get(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1927 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_clone(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1928 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_save_pos(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1929 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_traverse_start(struct btree_trans * trans,struct btree_path * path)1930 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_traverse_end(struct btree_trans * trans,struct btree_path * path)1931 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_set_pos(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1932 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_free(struct btree_trans * trans,btree_path_idx_t path,struct btree_path * dup)1933 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
trace_btree_path_free_trans_begin(btree_path_idx_t path)1934 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
1935 
1936 #endif
1937 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1938 
1939 #define _TRACE_BCACHEFS_H
1940 #endif /* _TRACE_BCACHEFS_H */
1941 
1942 /* This part must be outside protection */
1943 #undef TRACE_INCLUDE_PATH
1944 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1945 
1946 #undef TRACE_INCLUDE_FILE
1947 #define TRACE_INCLUDE_FILE trace
1948 
1949 #include <trace/define_trace.h>
1950