xref: /linux/drivers/net/thunderbolt/trace.h (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1*f7586527SMika Westerberg /* SPDX-License-Identifier: GPL-2.0 */
2*f7586527SMika Westerberg /*
3*f7586527SMika Westerberg  * Tracepoints for Thunderbolt/USB4 networking driver
4*f7586527SMika Westerberg  *
5*f7586527SMika Westerberg  * Copyright (C) 2023, Intel Corporation
6*f7586527SMika Westerberg  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7*f7586527SMika Westerberg  */
8*f7586527SMika Westerberg 
9*f7586527SMika Westerberg #undef TRACE_SYSTEM
10*f7586527SMika Westerberg #define TRACE_SYSTEM thunderbolt_net
11*f7586527SMika Westerberg 
12*f7586527SMika Westerberg #if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
13*f7586527SMika Westerberg #define __TRACE_THUNDERBOLT_NET_H
14*f7586527SMika Westerberg 
15*f7586527SMika Westerberg #include <linux/dma-direction.h>
16*f7586527SMika Westerberg #include <linux/skbuff.h>
17*f7586527SMika Westerberg #include <linux/tracepoint.h>
18*f7586527SMika Westerberg 
19*f7586527SMika Westerberg #define DMA_DATA_DIRECTION_NAMES			\
20*f7586527SMika Westerberg 	{ DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" },	\
21*f7586527SMika Westerberg 	{ DMA_TO_DEVICE, "DMA_TO_DEVICE" },		\
22*f7586527SMika Westerberg 	{ DMA_FROM_DEVICE, "DMA_FROM_DEVICE" },		\
23*f7586527SMika Westerberg 	{ DMA_NONE, "DMA_NONE" }
24*f7586527SMika Westerberg 
25*f7586527SMika Westerberg DECLARE_EVENT_CLASS(tbnet_frame,
26*f7586527SMika Westerberg 	TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
27*f7586527SMika Westerberg 		 enum dma_data_direction dir),
28*f7586527SMika Westerberg 	TP_ARGS(index, page, phys, dir),
29*f7586527SMika Westerberg 	TP_STRUCT__entry(
30*f7586527SMika Westerberg 		__field(unsigned int, index)
31*f7586527SMika Westerberg 		__field(const void *, page)
32*f7586527SMika Westerberg 		__field(dma_addr_t, phys)
33*f7586527SMika Westerberg 		__field(enum dma_data_direction, dir)
34*f7586527SMika Westerberg 	),
35*f7586527SMika Westerberg 	TP_fast_assign(
36*f7586527SMika Westerberg 		__entry->index = index;
37*f7586527SMika Westerberg 		__entry->page = page;
38*f7586527SMika Westerberg 		__entry->phys = phys;
39*f7586527SMika Westerberg 		__entry->dir = dir;
40*f7586527SMika Westerberg 	),
41*f7586527SMika Westerberg 	TP_printk("index=%u page=%p phys=%pad dir=%s",
42*f7586527SMika Westerberg 		  __entry->index, __entry->page, &__entry->phys,
43*f7586527SMika Westerberg 		__print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
44*f7586527SMika Westerberg );
45*f7586527SMika Westerberg 
46*f7586527SMika Westerberg DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
47*f7586527SMika Westerberg 	TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
48*f7586527SMika Westerberg 		 enum dma_data_direction dir),
49*f7586527SMika Westerberg 	TP_ARGS(index, page, phys, dir)
50*f7586527SMika Westerberg );
51*f7586527SMika Westerberg 
52*f7586527SMika Westerberg DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
53*f7586527SMika Westerberg 	TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
54*f7586527SMika Westerberg 		 enum dma_data_direction dir),
55*f7586527SMika Westerberg 	TP_ARGS(index, page, phys, dir)
56*f7586527SMika Westerberg );
57*f7586527SMika Westerberg 
58*f7586527SMika Westerberg DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
59*f7586527SMika Westerberg 	TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
60*f7586527SMika Westerberg 		 enum dma_data_direction dir),
61*f7586527SMika Westerberg 	TP_ARGS(index, page, phys, dir)
62*f7586527SMika Westerberg );
63*f7586527SMika Westerberg 
64*f7586527SMika Westerberg DECLARE_EVENT_CLASS(tbnet_ip_frame,
65*f7586527SMika Westerberg 	TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
66*f7586527SMika Westerberg 	TP_ARGS(size, id, index, count),
67*f7586527SMika Westerberg 	TP_STRUCT__entry(
68*f7586527SMika Westerberg 		__field(u32, size)
69*f7586527SMika Westerberg 		__field(u16, id)
70*f7586527SMika Westerberg 		__field(u16, index)
71*f7586527SMika Westerberg 		__field(u32, count)
72*f7586527SMika Westerberg 	),
73*f7586527SMika Westerberg 	TP_fast_assign(
74*f7586527SMika Westerberg 		__entry->size = le32_to_cpu(size);
75*f7586527SMika Westerberg 		__entry->id = le16_to_cpu(id);
76*f7586527SMika Westerberg 		__entry->index = le16_to_cpu(index);
77*f7586527SMika Westerberg 		__entry->count = le32_to_cpu(count);
78*f7586527SMika Westerberg 	),
79*f7586527SMika Westerberg 	TP_printk("id=%u size=%u index=%u count=%u",
80*f7586527SMika Westerberg 		  __entry->id, __entry->size, __entry->index, __entry->count)
81*f7586527SMika Westerberg );
82*f7586527SMika Westerberg 
83*f7586527SMika Westerberg DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
84*f7586527SMika Westerberg 	TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
85*f7586527SMika Westerberg 	TP_ARGS(size, id, index, count)
86*f7586527SMika Westerberg );
87*f7586527SMika Westerberg 
88*f7586527SMika Westerberg DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
89*f7586527SMika Westerberg 	TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
90*f7586527SMika Westerberg 	TP_ARGS(size, id, index, count)
91*f7586527SMika Westerberg );
92*f7586527SMika Westerberg 
93*f7586527SMika Westerberg DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
94*f7586527SMika Westerberg 	TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
95*f7586527SMika Westerberg 	TP_ARGS(size, id, index, count)
96*f7586527SMika Westerberg );
97*f7586527SMika Westerberg 
98*f7586527SMika Westerberg DECLARE_EVENT_CLASS(tbnet_skb,
99*f7586527SMika Westerberg 	TP_PROTO(const struct sk_buff *skb),
100*f7586527SMika Westerberg 	TP_ARGS(skb),
101*f7586527SMika Westerberg 	TP_STRUCT__entry(
102*f7586527SMika Westerberg 		__field(const void *, addr)
103*f7586527SMika Westerberg 		__field(unsigned int, len)
104*f7586527SMika Westerberg 		__field(unsigned int, data_len)
105*f7586527SMika Westerberg 		__field(unsigned int, nr_frags)
106*f7586527SMika Westerberg 	),
107*f7586527SMika Westerberg 	TP_fast_assign(
108*f7586527SMika Westerberg 		__entry->addr = skb;
109*f7586527SMika Westerberg 		__entry->len = skb->len;
110*f7586527SMika Westerberg 		__entry->data_len = skb->data_len;
111*f7586527SMika Westerberg 		__entry->nr_frags = skb_shinfo(skb)->nr_frags;
112*f7586527SMika Westerberg 	),
113*f7586527SMika Westerberg 	TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
114*f7586527SMika Westerberg 		  __entry->addr, __entry->len, __entry->data_len,
115*f7586527SMika Westerberg 		  __entry->nr_frags)
116*f7586527SMika Westerberg );
117*f7586527SMika Westerberg 
118*f7586527SMika Westerberg DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
119*f7586527SMika Westerberg 	TP_PROTO(const struct sk_buff *skb),
120*f7586527SMika Westerberg 	TP_ARGS(skb)
121*f7586527SMika Westerberg );
122*f7586527SMika Westerberg 
123*f7586527SMika Westerberg DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
124*f7586527SMika Westerberg 	TP_PROTO(const struct sk_buff *skb),
125*f7586527SMika Westerberg 	TP_ARGS(skb)
126*f7586527SMika Westerberg );
127*f7586527SMika Westerberg 
128*f7586527SMika Westerberg DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
129*f7586527SMika Westerberg 	TP_PROTO(const struct sk_buff *skb),
130*f7586527SMika Westerberg 	TP_ARGS(skb)
131*f7586527SMika Westerberg );
132*f7586527SMika Westerberg 
133*f7586527SMika Westerberg #endif /* _TRACE_THUNDERBOLT_NET_H */
134*f7586527SMika Westerberg 
135*f7586527SMika Westerberg #undef TRACE_INCLUDE_PATH
136*f7586527SMika Westerberg #define TRACE_INCLUDE_PATH .
137*f7586527SMika Westerberg 
138*f7586527SMika Westerberg #undef TRACE_INCLUDE_FILE
139*f7586527SMika Westerberg #define TRACE_INCLUDE_FILE trace
140*f7586527SMika Westerberg 
141*f7586527SMika Westerberg #include <trace/define_trace.h>
142