1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3
4 /* WARNING: This implementation is not necessarily the same
5 * as the tcp_dctcp.c. The purpose is mainly for testing
6 * the kernel BPF logic.
7 */
8
9 #include "bpf_tracing_net.h"
10 #include <bpf/bpf_helpers.h>
11 #include <bpf/bpf_tracing.h>
12
13 #ifndef EBUSY
14 #define EBUSY 16
15 #endif
16 #define min_not_zero(x, y) ({ \
17 typeof(x) __x = (x); \
18 typeof(y) __y = (y); \
19 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
20
21 char _license[] SEC("license") = "GPL";
22
23 volatile const char fallback_cc[TCP_CA_NAME_MAX];
24 const char bpf_dctcp[] = "bpf_dctcp";
25 const char tcp_cdg[] = "cdg";
26 char cc_res[TCP_CA_NAME_MAX];
27 int tcp_cdg_res = 0;
28 int stg_result = 0;
29 int ebusy_cnt = 0;
30
31 struct {
32 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
33 __uint(map_flags, BPF_F_NO_PREALLOC);
34 __type(key, int);
35 __type(value, int);
36 } sk_stg_map SEC(".maps");
37
38 #define DCTCP_MAX_ALPHA 1024U
39
40 struct bpf_dctcp {
41 __u32 old_delivered;
42 __u32 old_delivered_ce;
43 __u32 prior_rcv_nxt;
44 __u32 dctcp_alpha;
45 __u32 next_seq;
46 __u32 ce_state;
47 __u32 loss_cwnd;
48 };
49
50 static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
51 static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
52
dctcp_reset(const struct tcp_sock * tp,struct bpf_dctcp * ca)53 static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
54 {
55 ca->next_seq = tp->snd_nxt;
56
57 ca->old_delivered = tp->delivered;
58 ca->old_delivered_ce = tp->delivered_ce;
59 }
60
61 SEC("struct_ops")
BPF_PROG(bpf_dctcp_init,struct sock * sk)62 void BPF_PROG(bpf_dctcp_init, struct sock *sk)
63 {
64 const struct tcp_sock *tp = tcp_sk(sk);
65 struct bpf_dctcp *ca = inet_csk_ca(sk);
66 int *stg;
67
68 if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) {
69 /* Switch to fallback */
70 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
71 (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
72 ebusy_cnt++;
73
74 /* Switch back to myself and the recurred bpf_dctcp_init()
75 * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
76 * except the last "cdg" one.
77 */
78 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
79 (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
80 ebusy_cnt++;
81
82 /* Switch back to fallback */
83 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
84 (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
85 ebusy_cnt++;
86
87 /* Expecting -ENOTSUPP for tcp_cdg_res */
88 tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
89 (void *)tcp_cdg, sizeof(tcp_cdg));
90 bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
91 (void *)cc_res, sizeof(cc_res));
92 return;
93 }
94
95 ca->prior_rcv_nxt = tp->rcv_nxt;
96 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
97 ca->loss_cwnd = 0;
98 ca->ce_state = 0;
99
100 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
101 if (stg) {
102 stg_result = *stg;
103 bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
104 }
105 dctcp_reset(tp, ca);
106 }
107
108 SEC("struct_ops")
BPF_PROG(bpf_dctcp_ssthresh,struct sock * sk)109 __u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
110 {
111 struct bpf_dctcp *ca = inet_csk_ca(sk);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 ca->loss_cwnd = tp->snd_cwnd;
115 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
116 }
117
118 SEC("struct_ops")
BPF_PROG(bpf_dctcp_update_alpha,struct sock * sk,__u32 flags)119 void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
120 {
121 const struct tcp_sock *tp = tcp_sk(sk);
122 struct bpf_dctcp *ca = inet_csk_ca(sk);
123
124 /* Expired RTT */
125 if (!before(tp->snd_una, ca->next_seq)) {
126 __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
127 __u32 alpha = ca->dctcp_alpha;
128
129 /* alpha = (1 - g) * alpha + g * F */
130
131 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
132 if (delivered_ce) {
133 __u32 delivered = tp->delivered - ca->old_delivered;
134
135 /* If dctcp_shift_g == 1, a 32bit value would overflow
136 * after 8 M packets.
137 */
138 delivered_ce <<= (10 - dctcp_shift_g);
139 delivered_ce /= max(1U, delivered);
140
141 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
142 }
143 ca->dctcp_alpha = alpha;
144 dctcp_reset(tp, ca);
145 }
146 }
147
dctcp_react_to_loss(struct sock * sk)148 static void dctcp_react_to_loss(struct sock *sk)
149 {
150 struct bpf_dctcp *ca = inet_csk_ca(sk);
151 struct tcp_sock *tp = tcp_sk(sk);
152
153 ca->loss_cwnd = tp->snd_cwnd;
154 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
155 }
156
157 SEC("struct_ops")
BPF_PROG(bpf_dctcp_state,struct sock * sk,__u8 new_state)158 void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
159 {
160 if (new_state == TCP_CA_Recovery &&
161 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
162 dctcp_react_to_loss(sk);
163 /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
164 * one loss-adjustment per RTT.
165 */
166 }
167
dctcp_ece_ack_cwr(struct sock * sk,__u32 ce_state)168 static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
169 {
170 struct tcp_sock *tp = tcp_sk(sk);
171
172 if (ce_state == 1)
173 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
174 else
175 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
176 }
177
178 /* Minimal DCTP CE state machine:
179 *
180 * S: 0 <- last pkt was non-CE
181 * 1 <- last pkt was CE
182 */
dctcp_ece_ack_update(struct sock * sk,enum tcp_ca_event evt,__u32 * prior_rcv_nxt,__u32 * ce_state)183 static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
184 __u32 *prior_rcv_nxt, __u32 *ce_state)
185 {
186 __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
187
188 if (*ce_state != new_ce_state) {
189 /* CE state has changed, force an immediate ACK to
190 * reflect the new CE state. If an ACK was delayed,
191 * send that first to reflect the prior CE state.
192 */
193 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
194 dctcp_ece_ack_cwr(sk, *ce_state);
195 bpf_tcp_send_ack(sk, *prior_rcv_nxt);
196 }
197 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
198 }
199 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
200 *ce_state = new_ce_state;
201 dctcp_ece_ack_cwr(sk, new_ce_state);
202 }
203
204 SEC("struct_ops")
BPF_PROG(bpf_dctcp_cwnd_event,struct sock * sk,enum tcp_ca_event ev)205 void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
206 {
207 struct bpf_dctcp *ca = inet_csk_ca(sk);
208
209 switch (ev) {
210 case CA_EVENT_ECN_IS_CE:
211 case CA_EVENT_ECN_NO_CE:
212 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
213 break;
214 case CA_EVENT_LOSS:
215 dctcp_react_to_loss(sk);
216 break;
217 default:
218 /* Don't care for the rest. */
219 break;
220 }
221 }
222
223 SEC("struct_ops")
BPF_PROG(bpf_dctcp_cwnd_undo,struct sock * sk)224 __u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
225 {
226 const struct bpf_dctcp *ca = inet_csk_ca(sk);
227
228 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
229 }
230
231 extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
232
233 SEC("struct_ops")
BPF_PROG(bpf_dctcp_cong_avoid,struct sock * sk,__u32 ack,__u32 acked)234 void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
235 {
236 tcp_reno_cong_avoid(sk, ack, acked);
237 }
238
239 SEC(".struct_ops")
240 struct tcp_congestion_ops dctcp_nouse = {
241 .init = (void *)bpf_dctcp_init,
242 .set_state = (void *)bpf_dctcp_state,
243 .flags = TCP_CONG_NEEDS_ECN,
244 .name = "bpf_dctcp_nouse",
245 };
246
247 SEC(".struct_ops")
248 struct tcp_congestion_ops dctcp = {
249 .init = (void *)bpf_dctcp_init,
250 .in_ack_event = (void *)bpf_dctcp_update_alpha,
251 .cwnd_event = (void *)bpf_dctcp_cwnd_event,
252 .ssthresh = (void *)bpf_dctcp_ssthresh,
253 .cong_avoid = (void *)bpf_dctcp_cong_avoid,
254 .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
255 .set_state = (void *)bpf_dctcp_state,
256 .flags = TCP_CONG_NEEDS_ECN,
257 .name = "bpf_dctcp",
258 };
259