1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM mptcp
4 
5 #if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_MPTCP_H
7 
8 #include <linux/tracepoint.h>
9 
10 #define show_mapping_status(status)					\
11 	__print_symbolic(status,					\
12 		{ 0, "MAPPING_OK" },					\
13 		{ 1, "MAPPING_INVALID" },				\
14 		{ 2, "MAPPING_EMPTY" },					\
15 		{ 3, "MAPPING_DATA_FIN" },				\
16 		{ 4, "MAPPING_DUMMY" })
17 
18 TRACE_EVENT(mptcp_subflow_get_send,
19 
20 	TP_PROTO(struct mptcp_subflow_context *subflow),
21 
22 	TP_ARGS(subflow),
23 
24 	TP_STRUCT__entry(
25 		__field(bool, active)
26 		__field(bool, free)
27 		__field(u32, snd_wnd)
28 		__field(u32, pace)
29 		__field(u8, backup)
30 		__field(u64, ratio)
31 	),
32 
33 	TP_fast_assign(
34 		struct sock *ssk;
35 
36 		__entry->active = mptcp_subflow_active(subflow);
37 		__entry->backup = subflow->backup || subflow->request_bkup;
38 
39 		if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
40 			__entry->free = sk_stream_memory_free(subflow->tcp_sock);
41 		else
42 			__entry->free = 0;
43 
44 		ssk = mptcp_subflow_tcp_sock(subflow);
45 		if (ssk && sk_fullsock(ssk)) {
46 			__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
47 			__entry->pace = READ_ONCE(ssk->sk_pacing_rate);
48 		} else {
49 			__entry->snd_wnd = 0;
50 			__entry->pace = 0;
51 		}
52 
53 		if (ssk && sk_fullsock(ssk) && __entry->pace)
54 			__entry->ratio = div_u64((u64)ssk->sk_wmem_queued << 32, __entry->pace);
55 		else
56 			__entry->ratio = 0;
57 	),
58 
59 	TP_printk("active=%d free=%d snd_wnd=%u pace=%u backup=%u ratio=%llu",
60 		  __entry->active, __entry->free,
61 		  __entry->snd_wnd, __entry->pace,
62 		  __entry->backup, __entry->ratio)
63 );
64 
65 DECLARE_EVENT_CLASS(mptcp_dump_mpext,
66 
67 	TP_PROTO(struct mptcp_ext *mpext),
68 
69 	TP_ARGS(mpext),
70 
71 	TP_STRUCT__entry(
72 		__field(u64, data_ack)
73 		__field(u64, data_seq)
74 		__field(u32, subflow_seq)
75 		__field(u16, data_len)
76 		__field(u16, csum)
77 		__field(u8, use_map)
78 		__field(u8, dsn64)
79 		__field(u8, data_fin)
80 		__field(u8, use_ack)
81 		__field(u8, ack64)
82 		__field(u8, mpc_map)
83 		__field(u8, frozen)
84 		__field(u8, reset_transient)
85 		__field(u8, reset_reason)
86 		__field(u8, csum_reqd)
87 		__field(u8, infinite_map)
88 	),
89 
90 	TP_fast_assign(
91 		__entry->data_ack = mpext->ack64 ? mpext->data_ack : mpext->data_ack32;
92 		__entry->data_seq = mpext->data_seq;
93 		__entry->subflow_seq = mpext->subflow_seq;
94 		__entry->data_len = mpext->data_len;
95 		__entry->csum = (__force u16)mpext->csum;
96 		__entry->use_map = mpext->use_map;
97 		__entry->dsn64 = mpext->dsn64;
98 		__entry->data_fin = mpext->data_fin;
99 		__entry->use_ack = mpext->use_ack;
100 		__entry->ack64 = mpext->ack64;
101 		__entry->mpc_map = mpext->mpc_map;
102 		__entry->frozen = mpext->frozen;
103 		__entry->reset_transient = mpext->reset_transient;
104 		__entry->reset_reason = mpext->reset_reason;
105 		__entry->csum_reqd = mpext->csum_reqd;
106 		__entry->infinite_map = mpext->infinite_map;
107 	),
108 
109 	TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u infinite_map=%u",
110 		  __entry->data_ack, __entry->data_seq,
111 		  __entry->subflow_seq, __entry->data_len,
112 		  __entry->csum, __entry->use_map,
113 		  __entry->dsn64, __entry->data_fin,
114 		  __entry->use_ack, __entry->ack64,
115 		  __entry->mpc_map, __entry->frozen,
116 		  __entry->reset_transient, __entry->reset_reason,
117 		  __entry->csum_reqd, __entry->infinite_map)
118 );
119 
120 DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
121 	TP_PROTO(struct mptcp_ext *mpext),
122 	TP_ARGS(mpext));
123 
124 DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
125 	TP_PROTO(struct mptcp_ext *mpext),
126 	TP_ARGS(mpext));
127 
128 TRACE_EVENT(ack_update_msk,
129 
130 	TP_PROTO(u64 data_ack, u64 old_snd_una,
131 		 u64 new_snd_una, u64 new_wnd_end,
132 		 u64 msk_wnd_end),
133 
134 	TP_ARGS(data_ack, old_snd_una,
135 		new_snd_una, new_wnd_end,
136 		msk_wnd_end),
137 
138 	TP_STRUCT__entry(
139 		__field(u64, data_ack)
140 		__field(u64, old_snd_una)
141 		__field(u64, new_snd_una)
142 		__field(u64, new_wnd_end)
143 		__field(u64, msk_wnd_end)
144 	),
145 
146 	TP_fast_assign(
147 		__entry->data_ack = data_ack;
148 		__entry->old_snd_una = old_snd_una;
149 		__entry->new_snd_una = new_snd_una;
150 		__entry->new_wnd_end = new_wnd_end;
151 		__entry->msk_wnd_end = msk_wnd_end;
152 	),
153 
154 	TP_printk("data_ack=%llu old_snd_una=%llu new_snd_una=%llu new_wnd_end=%llu msk_wnd_end=%llu",
155 		  __entry->data_ack, __entry->old_snd_una,
156 		  __entry->new_snd_una, __entry->new_wnd_end,
157 		  __entry->msk_wnd_end)
158 );
159 
160 TRACE_EVENT(subflow_check_data_avail,
161 
162 	TP_PROTO(__u8 status, struct sk_buff *skb),
163 
164 	TP_ARGS(status, skb),
165 
166 	TP_STRUCT__entry(
167 		__field(u8, status)
168 		__field(const void *, skb)
169 	),
170 
171 	TP_fast_assign(
172 		__entry->status = status;
173 		__entry->skb = skb;
174 	),
175 
176 	TP_printk("mapping_status=%s, skb=%p",
177 		  show_mapping_status(__entry->status),
178 		  __entry->skb)
179 );
180 
181 #endif /* _TRACE_MPTCP_H */
182 
183 /* This part must be outside protection */
184 #include <trace/define_trace.h>
185