xref: /linux/tools/testing/selftests/bpf/prog_tests/test_xsk.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/bpf.h>
3 #include <errno.h>
4 #include <linux/bitmap.h>
5 #include <linux/if_link.h>
6 #include <linux/mman.h>
7 #include <linux/netdev.h>
8 #include <poll.h>
9 #include <pthread.h>
10 #include <signal.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/time.h>
15 #include <unistd.h>
16 
17 #include "network_helpers.h"
18 #include "test_xsk.h"
19 #include "xsk_xdp_common.h"
20 #include "xsk_xdp_progs.skel.h"
21 
22 #define DEFAULT_BATCH_SIZE		64
23 #define MIN_PKT_SIZE			64
24 #define MAX_ETH_JUMBO_SIZE		9000
25 #define MAX_INTERFACES			2
26 #define MAX_TEARDOWN_ITER		10
27 #define MAX_TX_BUDGET_DEFAULT		32
28 #define PKT_DUMP_NB_TO_PRINT		16
29 /* Just to align the data in the packet */
30 #define PKT_HDR_SIZE			(sizeof(struct ethhdr) + 2)
31 #define POLL_TMOUT			1000
32 #define THREAD_TMOUT			3
33 #define UMEM_HEADROOM_TEST_SIZE		128
34 #define XSK_DESC__INVALID_OPTION	(0xffff)
35 #define XSK_UMEM__INVALID_FRAME_SIZE	(MAX_ETH_JUMBO_SIZE + 1)
36 #define XSK_UMEM__LARGE_FRAME_SIZE	(3 * 1024)
37 #define XSK_UMEM__MAX_FRAME_SIZE	(4 * 1024)
38 
39 static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
40 
41 bool opt_verbose;
42 pthread_barrier_t barr;
43 pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
44 
45 int pkts_in_flight;
46 
47 /* The payload is a word consisting of a packet sequence number in the upper
48  * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
49  * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
50  */
write_payload(void * dest,u32 pkt_nb,u32 start,u32 size)51 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
52 {
53 	u32 *ptr = (u32 *)dest, i;
54 
55 	start /= sizeof(*ptr);
56 	size /= sizeof(*ptr);
57 	for (i = 0; i < size; i++)
58 		ptr[i] = htonl(pkt_nb << 16 | (i + start));
59 }
60 
gen_eth_hdr(struct xsk_socket_info * xsk,struct ethhdr * eth_hdr)61 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
62 {
63 	memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
64 	memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
65 	eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
66 }
67 
is_umem_valid(struct ifobject * ifobj)68 static bool is_umem_valid(struct ifobject *ifobj)
69 {
70 	return !!ifobj->umem->umem;
71 }
72 
mode_to_xdp_flags(enum test_mode mode)73 static u32 mode_to_xdp_flags(enum test_mode mode)
74 {
75 	return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
76 }
77 
umem_size(struct xsk_umem_info * umem)78 static u64 umem_size(struct xsk_umem_info *umem)
79 {
80 	return umem->num_frames * umem->frame_size;
81 }
82 
xsk_configure_umem(struct ifobject * ifobj,struct xsk_umem_info * umem,void * buffer,u64 size)83 int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
84 			      u64 size)
85 {
86 	struct xsk_umem_config cfg = {
87 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
88 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
89 		.frame_size = umem->frame_size,
90 		.frame_headroom = umem->frame_headroom,
91 		.flags = XSK_UMEM__DEFAULT_FLAGS
92 	};
93 	int ret;
94 
95 	if (umem->fill_size)
96 		cfg.fill_size = umem->fill_size;
97 
98 	if (umem->comp_size)
99 		cfg.comp_size = umem->comp_size;
100 
101 	if (umem->unaligned_mode)
102 		cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
103 
104 	ret = xsk_umem__create(&umem->umem, buffer, size,
105 			       &umem->fq, &umem->cq, &cfg);
106 	if (ret)
107 		return ret;
108 
109 	umem->buffer = buffer;
110 	if (ifobj->shared_umem && ifobj->rx_on) {
111 		umem->base_addr = umem_size(umem);
112 		umem->next_buffer = umem_size(umem);
113 	}
114 
115 	return 0;
116 }
117 
umem_alloc_buffer(struct xsk_umem_info * umem)118 static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
119 {
120 	u64 addr;
121 
122 	addr = umem->next_buffer;
123 	umem->next_buffer += umem->frame_size;
124 	if (umem->next_buffer >= umem->base_addr + umem_size(umem))
125 		umem->next_buffer = umem->base_addr;
126 
127 	return addr;
128 }
129 
umem_reset_alloc(struct xsk_umem_info * umem)130 static void umem_reset_alloc(struct xsk_umem_info *umem)
131 {
132 	umem->next_buffer = 0;
133 }
134 
enable_busy_poll(struct xsk_socket_info * xsk)135 static int enable_busy_poll(struct xsk_socket_info *xsk)
136 {
137 	int sock_opt;
138 
139 	sock_opt = 1;
140 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
141 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
142 		return -errno;
143 
144 	sock_opt = 20;
145 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
146 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
147 		return -errno;
148 
149 	sock_opt = xsk->batch_size;
150 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
151 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
152 		return -errno;
153 
154 	return 0;
155 }
156 
xsk_configure_socket(struct xsk_socket_info * xsk,struct xsk_umem_info * umem,struct ifobject * ifobject,bool shared)157 int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
158 				  struct ifobject *ifobject, bool shared)
159 {
160 	struct xsk_socket_config cfg = {};
161 	struct xsk_ring_cons *rxr;
162 	struct xsk_ring_prod *txr;
163 
164 	xsk->umem = umem;
165 	cfg.rx_size = xsk->rxqsize;
166 	cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
167 	cfg.bind_flags = ifobject->bind_flags;
168 	if (shared)
169 		cfg.bind_flags |= XDP_SHARED_UMEM;
170 	if (ifobject->mtu > MAX_ETH_PKT_SIZE)
171 		cfg.bind_flags |= XDP_USE_SG;
172 	if (umem->comp_size)
173 		cfg.tx_size = umem->comp_size;
174 	if (umem->fill_size)
175 		cfg.rx_size = umem->fill_size;
176 
177 	txr = ifobject->tx_on ? &xsk->tx : NULL;
178 	rxr = ifobject->rx_on ? &xsk->rx : NULL;
179 	return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
180 }
181 
set_ring_size(struct ifobject * ifobj)182 static int set_ring_size(struct ifobject *ifobj)
183 {
184 	int ret;
185 	u32 ctr = 0;
186 
187 	while (ctr++ < SOCK_RECONF_CTR) {
188 		ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
189 		if (!ret)
190 			break;
191 
192 		/* Retry if it fails */
193 		if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
194 			return -errno;
195 
196 		usleep(USLEEP_MAX);
197 	}
198 
199 	return ret;
200 }
201 
hw_ring_size_reset(struct ifobject * ifobj)202 int hw_ring_size_reset(struct ifobject *ifobj)
203 {
204 	ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
205 	ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
206 	return set_ring_size(ifobj);
207 }
208 
__test_spec_init(struct test_spec * test,struct ifobject * ifobj_tx,struct ifobject * ifobj_rx)209 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
210 			     struct ifobject *ifobj_rx)
211 {
212 	u32 i, j;
213 
214 	for (i = 0; i < MAX_INTERFACES; i++) {
215 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
216 
217 		ifobj->xsk = &ifobj->xsk_arr[0];
218 		ifobj->use_poll = false;
219 		ifobj->use_fill_ring = true;
220 		ifobj->release_rx = true;
221 		ifobj->validation_func = NULL;
222 		ifobj->use_metadata = false;
223 
224 		if (i == 0) {
225 			ifobj->rx_on = false;
226 			ifobj->tx_on = true;
227 		} else {
228 			ifobj->rx_on = true;
229 			ifobj->tx_on = false;
230 		}
231 
232 		memset(ifobj->umem, 0, sizeof(*ifobj->umem));
233 		ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
234 		ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
235 
236 		for (j = 0; j < MAX_SOCKETS; j++) {
237 			memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
238 			ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
239 			ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
240 			if (i == 0)
241 				ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
242 			else
243 				ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
244 
245 			memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
246 			memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
247 			ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
248 			ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
249 		}
250 	}
251 
252 	if (ifobj_tx->hw_ring_size_supp)
253 		hw_ring_size_reset(ifobj_tx);
254 
255 	test->ifobj_tx = ifobj_tx;
256 	test->ifobj_rx = ifobj_rx;
257 	test->current_step = 0;
258 	test->total_steps = 1;
259 	test->nb_sockets = 1;
260 	test->fail = false;
261 	test->set_ring = false;
262 	test->adjust_tail = false;
263 	test->adjust_tail_support = false;
264 	test->mtu = MAX_ETH_PKT_SIZE;
265 	test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
266 	test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
267 	test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
268 	test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
269 }
270 
test_init(struct test_spec * test,struct ifobject * ifobj_tx,struct ifobject * ifobj_rx,enum test_mode mode,const struct test_spec * test_to_run)271 void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
272 			   struct ifobject *ifobj_rx, enum test_mode mode,
273 			   const struct test_spec *test_to_run)
274 {
275 	struct pkt_stream *tx_pkt_stream;
276 	struct pkt_stream *rx_pkt_stream;
277 	u32 i;
278 
279 	tx_pkt_stream = test->tx_pkt_stream_default;
280 	rx_pkt_stream = test->rx_pkt_stream_default;
281 	memset(test, 0, sizeof(*test));
282 	test->tx_pkt_stream_default = tx_pkt_stream;
283 	test->rx_pkt_stream_default = rx_pkt_stream;
284 
285 	for (i = 0; i < MAX_INTERFACES; i++) {
286 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
287 
288 		ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
289 		if (mode == TEST_MODE_ZC)
290 			ifobj->bind_flags |= XDP_ZEROCOPY;
291 		else
292 			ifobj->bind_flags |= XDP_COPY;
293 	}
294 
295 	memcpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
296 	test->test_func = test_to_run->test_func;
297 	test->mode = mode;
298 	__test_spec_init(test, ifobj_tx, ifobj_rx);
299 }
300 
test_spec_reset(struct test_spec * test)301 static void test_spec_reset(struct test_spec *test)
302 {
303 	__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
304 }
305 
test_spec_set_xdp_prog(struct test_spec * test,struct bpf_program * xdp_prog_rx,struct bpf_program * xdp_prog_tx,struct bpf_map * xskmap_rx,struct bpf_map * xskmap_tx)306 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
307 				   struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
308 				   struct bpf_map *xskmap_tx)
309 {
310 	test->xdp_prog_rx = xdp_prog_rx;
311 	test->xdp_prog_tx = xdp_prog_tx;
312 	test->xskmap_rx = xskmap_rx;
313 	test->xskmap_tx = xskmap_tx;
314 }
315 
test_spec_set_mtu(struct test_spec * test,int mtu)316 static int test_spec_set_mtu(struct test_spec *test, int mtu)
317 {
318 	int err;
319 
320 	if (test->ifobj_rx->mtu != mtu) {
321 		err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
322 		if (err)
323 			return err;
324 		test->ifobj_rx->mtu = mtu;
325 	}
326 	if (test->ifobj_tx->mtu != mtu) {
327 		err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
328 		if (err)
329 			return err;
330 		test->ifobj_tx->mtu = mtu;
331 	}
332 
333 	return 0;
334 }
335 
pkt_stream_reset(struct pkt_stream * pkt_stream)336 void pkt_stream_reset(struct pkt_stream *pkt_stream)
337 {
338 	if (pkt_stream) {
339 		pkt_stream->current_pkt_nb = 0;
340 		pkt_stream->nb_rx_pkts = 0;
341 	}
342 }
343 
pkt_stream_get_next_tx_pkt(struct pkt_stream * pkt_stream)344 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
345 {
346 	if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
347 		return NULL;
348 
349 	return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
350 }
351 
pkt_stream_get_next_rx_pkt(struct pkt_stream * pkt_stream,u32 * pkts_sent)352 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
353 {
354 	while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
355 		(*pkts_sent)++;
356 		if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
357 			return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
358 		pkt_stream->current_pkt_nb++;
359 	}
360 	return NULL;
361 }
362 
pkt_stream_delete(struct pkt_stream * pkt_stream)363 void pkt_stream_delete(struct pkt_stream *pkt_stream)
364 {
365 	free(pkt_stream->pkts);
366 	free(pkt_stream);
367 }
368 
pkt_stream_restore_default(struct test_spec * test)369 void pkt_stream_restore_default(struct test_spec *test)
370 {
371 	struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
372 	struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
373 
374 	if (tx_pkt_stream != test->tx_pkt_stream_default) {
375 		pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
376 		test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
377 	}
378 
379 	if (rx_pkt_stream != test->rx_pkt_stream_default) {
380 		pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
381 		test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
382 	}
383 }
384 
__pkt_stream_alloc(u32 nb_pkts)385 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
386 {
387 	struct pkt_stream *pkt_stream;
388 
389 	pkt_stream = calloc(1, sizeof(*pkt_stream));
390 	if (!pkt_stream)
391 		return NULL;
392 
393 	pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
394 	if (!pkt_stream->pkts) {
395 		free(pkt_stream);
396 		return NULL;
397 	}
398 
399 	pkt_stream->nb_pkts = nb_pkts;
400 	return pkt_stream;
401 }
402 
pkt_nb_frags(u32 frame_size,struct pkt_stream * pkt_stream,struct pkt * pkt)403 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
404 {
405 	u32 nb_frags = 1, next_frag;
406 
407 	if (!pkt)
408 		return 1;
409 
410 	if (!pkt_stream->verbatim) {
411 		if (!pkt->valid || !pkt->len)
412 			return 1;
413 		return ceil_u32(pkt->len, frame_size);
414 	}
415 
416 	/* Search for the end of the packet in verbatim mode */
417 	if (!pkt_continues(pkt->options) || !pkt->valid)
418 		return nb_frags;
419 
420 	next_frag = pkt_stream->current_pkt_nb;
421 	pkt++;
422 	while (next_frag++ < pkt_stream->nb_pkts) {
423 		nb_frags++;
424 		if (!pkt_continues(pkt->options) || !pkt->valid)
425 			break;
426 		pkt++;
427 	}
428 	return nb_frags;
429 }
430 
set_pkt_valid(int offset,u32 len)431 static bool set_pkt_valid(int offset, u32 len)
432 {
433 	return len <= MAX_ETH_JUMBO_SIZE;
434 }
435 
pkt_set(struct pkt_stream * pkt_stream,struct pkt * pkt,int offset,u32 len)436 static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
437 {
438 	pkt->offset = offset;
439 	pkt->len = len;
440 	pkt->valid = set_pkt_valid(offset, len);
441 }
442 
pkt_stream_pkt_set(struct pkt_stream * pkt_stream,struct pkt * pkt,int offset,u32 len)443 static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
444 {
445 	bool prev_pkt_valid = pkt->valid;
446 
447 	pkt_set(pkt_stream, pkt, offset, len);
448 	pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
449 }
450 
pkt_get_buffer_len(struct xsk_umem_info * umem,u32 len)451 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
452 {
453 	return ceil_u32(len, umem->frame_size) * umem->frame_size;
454 }
455 
__pkt_stream_generate(u32 nb_pkts,u32 pkt_len,u32 nb_start,u32 nb_off)456 static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
457 {
458 	struct pkt_stream *pkt_stream;
459 	u32 i;
460 
461 	pkt_stream = __pkt_stream_alloc(nb_pkts);
462 	if (!pkt_stream)
463 		return NULL;
464 
465 	pkt_stream->nb_pkts = nb_pkts;
466 	pkt_stream->max_pkt_len = pkt_len;
467 	for (i = 0; i < nb_pkts; i++) {
468 		struct pkt *pkt = &pkt_stream->pkts[i];
469 
470 		pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
471 		pkt->pkt_nb = nb_start + i * nb_off;
472 	}
473 
474 	return pkt_stream;
475 }
476 
pkt_stream_generate(u32 nb_pkts,u32 pkt_len)477 struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
478 {
479 	return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
480 }
481 
pkt_stream_clone(struct pkt_stream * pkt_stream)482 static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
483 {
484 	return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
485 }
486 
pkt_stream_replace_ifobject(struct ifobject * ifobj,u32 nb_pkts,u32 pkt_len)487 static int pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
488 {
489 	ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
490 
491 	if (!ifobj->xsk->pkt_stream)
492 		return -ENOMEM;
493 
494 	return 0;
495 }
496 
pkt_stream_replace(struct test_spec * test,u32 nb_pkts,u32 pkt_len)497 static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
498 {
499 	int ret;
500 
501 	ret = pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
502 	if (ret)
503 		return ret;
504 
505 	return pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
506 }
507 
__pkt_stream_replace_half(struct ifobject * ifobj,u32 pkt_len,int offset)508 static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
509 				      int offset)
510 {
511 	struct pkt_stream *pkt_stream;
512 	u32 i;
513 
514 	pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
515 	if (!pkt_stream)
516 		return -ENOMEM;
517 
518 	for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
519 		pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
520 
521 	ifobj->xsk->pkt_stream = pkt_stream;
522 
523 	return 0;
524 }
525 
pkt_stream_replace_half(struct test_spec * test,u32 pkt_len,int offset)526 static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
527 {
528 	int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
529 
530 	if (ret)
531 		return ret;
532 
533 	return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
534 }
535 
pkt_stream_receive_half(struct test_spec * test)536 static int pkt_stream_receive_half(struct test_spec *test)
537 {
538 	struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
539 	u32 i;
540 
541 	if (test->ifobj_rx->xsk->pkt_stream != test->rx_pkt_stream_default)
542 		/* Packet stream has already been replaced so we have to release this one.
543 		 * The newly created one will be freed by the restore_default() at the
544 		 * end of the test
545 		 */
546 		pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
547 
548 	test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
549 							      pkt_stream->pkts[0].len);
550 	if (!test->ifobj_rx->xsk->pkt_stream)
551 		return -ENOMEM;
552 
553 	pkt_stream = test->ifobj_rx->xsk->pkt_stream;
554 	for (i = 1; i < pkt_stream->nb_pkts; i += 2)
555 		pkt_stream->pkts[i].valid = false;
556 
557 	pkt_stream->nb_valid_entries /= 2;
558 
559 	return 0;
560 }
561 
pkt_stream_even_odd_sequence(struct test_spec * test)562 static int pkt_stream_even_odd_sequence(struct test_spec *test)
563 {
564 	struct pkt_stream *pkt_stream;
565 	u32 i;
566 
567 	for (i = 0; i < test->nb_sockets; i++) {
568 		pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
569 		pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
570 						   pkt_stream->pkts[0].len, i, 2);
571 		if (!pkt_stream)
572 			return -ENOMEM;
573 		test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
574 
575 		pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
576 		pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
577 						   pkt_stream->pkts[0].len, i, 2);
578 		if (!pkt_stream)
579 			return -ENOMEM;
580 		test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
581 	}
582 
583 	return 0;
584 }
585 
release_even_odd_sequence(struct test_spec * test)586 static void release_even_odd_sequence(struct test_spec *test)
587 {
588 	struct pkt_stream *later_free_tx = test->ifobj_tx->xsk->pkt_stream;
589 	struct pkt_stream *later_free_rx = test->ifobj_rx->xsk->pkt_stream;
590 	int i;
591 
592 	for (i = 0; i < test->nb_sockets; i++) {
593 		/* later_free_{rx/tx} will be freed by restore_default() */
594 		if (test->ifobj_tx->xsk_arr[i].pkt_stream != later_free_tx)
595 			pkt_stream_delete(test->ifobj_tx->xsk_arr[i].pkt_stream);
596 		if (test->ifobj_rx->xsk_arr[i].pkt_stream != later_free_rx)
597 			pkt_stream_delete(test->ifobj_rx->xsk_arr[i].pkt_stream);
598 	}
599 
600 }
601 
pkt_get_addr(struct pkt * pkt,struct xsk_umem_info * umem)602 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
603 {
604 	if (!pkt->valid)
605 		return pkt->offset;
606 	return pkt->offset + umem_alloc_buffer(umem);
607 }
608 
pkt_stream_cancel(struct pkt_stream * pkt_stream)609 static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
610 {
611 	pkt_stream->current_pkt_nb--;
612 }
613 
pkt_generate(struct xsk_socket_info * xsk,struct xsk_umem_info * umem,u64 addr,u32 len,u32 pkt_nb,u32 bytes_written)614 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
615 			 u32 pkt_nb, u32 bytes_written)
616 {
617 	void *data = xsk_umem__get_data(umem->buffer, addr);
618 
619 	if (len < MIN_PKT_SIZE)
620 		return;
621 
622 	if (!bytes_written) {
623 		gen_eth_hdr(xsk, data);
624 
625 		len -= PKT_HDR_SIZE;
626 		data += PKT_HDR_SIZE;
627 	} else {
628 		bytes_written -= PKT_HDR_SIZE;
629 	}
630 
631 	write_payload(data, pkt_nb, bytes_written, len);
632 }
633 
__pkt_stream_generate_custom(struct ifobject * ifobj,struct pkt * frames,u32 nb_frames,bool verbatim)634 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
635 						       u32 nb_frames, bool verbatim)
636 {
637 	u32 i, len = 0, pkt_nb = 0, payload = 0;
638 	struct pkt_stream *pkt_stream;
639 
640 	pkt_stream = __pkt_stream_alloc(nb_frames);
641 	if (!pkt_stream)
642 		return NULL;
643 
644 	for (i = 0; i < nb_frames; i++) {
645 		struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
646 		struct pkt *frame = &frames[i];
647 
648 		pkt->offset = frame->offset;
649 		if (verbatim) {
650 			*pkt = *frame;
651 			pkt->pkt_nb = payload;
652 			if (!frame->valid || !pkt_continues(frame->options))
653 				payload++;
654 		} else {
655 			if (frame->valid)
656 				len += frame->len;
657 			if (frame->valid && pkt_continues(frame->options))
658 				continue;
659 
660 			pkt->pkt_nb = pkt_nb;
661 			pkt->len = len;
662 			pkt->valid = frame->valid;
663 			pkt->options = 0;
664 
665 			len = 0;
666 		}
667 
668 		print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
669 			      pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
670 
671 		if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
672 			pkt_stream->max_pkt_len = pkt->len;
673 
674 		if (pkt->valid)
675 			pkt_stream->nb_valid_entries++;
676 
677 		pkt_nb++;
678 	}
679 
680 	pkt_stream->nb_pkts = pkt_nb;
681 	pkt_stream->verbatim = verbatim;
682 	return pkt_stream;
683 }
684 
pkt_stream_generate_custom(struct test_spec * test,struct pkt * pkts,u32 nb_pkts)685 static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
686 {
687 	struct pkt_stream *pkt_stream;
688 
689 	pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
690 	if (!pkt_stream)
691 		return -ENOMEM;
692 	test->ifobj_tx->xsk->pkt_stream = pkt_stream;
693 
694 	pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
695 	if (!pkt_stream)
696 		return -ENOMEM;
697 	test->ifobj_rx->xsk->pkt_stream = pkt_stream;
698 
699 	return 0;
700 }
701 
pkt_print_data(u32 * data,u32 cnt)702 static void pkt_print_data(u32 *data, u32 cnt)
703 {
704 	u32 i;
705 
706 	for (i = 0; i < cnt; i++) {
707 		u32 seqnum, pkt_nb;
708 
709 		seqnum = ntohl(*data) & 0xffff;
710 		pkt_nb = ntohl(*data) >> 16;
711 		ksft_print_msg("%u:%u ", pkt_nb, seqnum);
712 		data++;
713 	}
714 }
715 
pkt_dump(void * pkt,u32 len,bool eth_header)716 static void pkt_dump(void *pkt, u32 len, bool eth_header)
717 {
718 	struct ethhdr *ethhdr = pkt;
719 	u32 i, *data;
720 
721 	if (eth_header) {
722 		/*extract L2 frame */
723 		ksft_print_msg("DEBUG>> L2: dst mac: ");
724 		for (i = 0; i < ETH_ALEN; i++)
725 			ksft_print_msg("%02X", ethhdr->h_dest[i]);
726 
727 		ksft_print_msg("\nDEBUG>> L2: src mac: ");
728 		for (i = 0; i < ETH_ALEN; i++)
729 			ksft_print_msg("%02X", ethhdr->h_source[i]);
730 
731 		data = pkt + PKT_HDR_SIZE;
732 	} else {
733 		data = pkt;
734 	}
735 
736 	/*extract L5 frame */
737 	ksft_print_msg("\nDEBUG>> L5: seqnum: ");
738 	pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
739 	ksft_print_msg("....");
740 	if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
741 		ksft_print_msg("\n.... ");
742 		pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
743 			       PKT_DUMP_NB_TO_PRINT);
744 	}
745 	ksft_print_msg("\n---------------------------------------\n");
746 }
747 
is_offset_correct(struct xsk_umem_info * umem,struct pkt * pkt,u64 addr)748 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
749 {
750 	u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
751 	u32 offset = addr % umem->frame_size, expected_offset;
752 	int pkt_offset = pkt->valid ? pkt->offset : 0;
753 
754 	if (!umem->unaligned_mode)
755 		pkt_offset = 0;
756 
757 	expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
758 
759 	if (offset == expected_offset)
760 		return true;
761 
762 	ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
763 	return false;
764 }
765 
is_metadata_correct(struct pkt * pkt,void * buffer,u64 addr)766 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
767 {
768 	void *data = xsk_umem__get_data(buffer, addr);
769 	struct xdp_info *meta = data - sizeof(struct xdp_info);
770 
771 	if (meta->count != pkt->pkt_nb) {
772 		ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
773 			       __func__, pkt->pkt_nb,
774 			       (unsigned long long)meta->count);
775 		return false;
776 	}
777 
778 	return true;
779 }
780 
is_adjust_tail_supported(struct xsk_xdp_progs * skel_rx,bool * supported)781 static int is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx, bool *supported)
782 {
783 	struct bpf_map *data_map;
784 	int adjust_value = 0;
785 	int key = 0;
786 	int ret;
787 
788 	data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
789 	if (!data_map || !bpf_map__is_internal(data_map)) {
790 		ksft_print_msg("Error: could not find bss section of XDP program\n");
791 		return -EINVAL;
792 	}
793 
794 	ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
795 	if (ret) {
796 		ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
797 		return ret;
798 	}
799 
800 	/* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
801 	 * helper is not supported. Skip the adjust_tail test case in this scenario.
802 	 */
803 	*supported = adjust_value != -EOPNOTSUPP;
804 
805 	return 0;
806 }
807 
is_frag_valid(struct xsk_umem_info * umem,u64 addr,u32 len,u32 expected_pkt_nb,u32 bytes_processed)808 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
809 			  u32 bytes_processed)
810 {
811 	u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
812 	void *data = xsk_umem__get_data(umem->buffer, addr);
813 
814 	addr -= umem->base_addr;
815 
816 	if (addr >= umem->num_frames * umem->frame_size ||
817 	    addr + len > umem->num_frames * umem->frame_size) {
818 		ksft_print_msg("Frag invalid addr: %llx len: %u\n",
819 			       (unsigned long long)addr, len);
820 		return false;
821 	}
822 	if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
823 		ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
824 			       (unsigned long long)addr, len);
825 		return false;
826 	}
827 
828 	pkt_data = data;
829 	if (!bytes_processed) {
830 		pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
831 		len -= PKT_HDR_SIZE;
832 	} else {
833 		bytes_processed -= PKT_HDR_SIZE;
834 	}
835 
836 	expected_seqnum = bytes_processed / sizeof(*pkt_data);
837 	seqnum = ntohl(*pkt_data) & 0xffff;
838 	pkt_nb = ntohl(*pkt_data) >> 16;
839 
840 	if (expected_pkt_nb != pkt_nb) {
841 		ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
842 			       __func__, expected_pkt_nb, pkt_nb);
843 		goto error;
844 	}
845 	if (expected_seqnum != seqnum) {
846 		ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
847 			       __func__, expected_seqnum, seqnum);
848 		goto error;
849 	}
850 
851 	words_to_end = len / sizeof(*pkt_data) - 1;
852 	pkt_data += words_to_end;
853 	seqnum = ntohl(*pkt_data) & 0xffff;
854 	expected_seqnum += words_to_end;
855 	if (expected_seqnum != seqnum) {
856 		ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
857 			       __func__, expected_seqnum, seqnum);
858 		goto error;
859 	}
860 
861 	return true;
862 
863 error:
864 	pkt_dump(data, len, !bytes_processed);
865 	return false;
866 }
867 
is_pkt_valid(struct pkt * pkt,void * buffer,u64 addr,u32 len)868 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
869 {
870 	if (pkt->len != len) {
871 		ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
872 			       __func__, pkt->len, len);
873 		pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
874 		return false;
875 	}
876 
877 	return true;
878 }
879 
load_value(u32 * counter)880 static u32 load_value(u32 *counter)
881 {
882 	return __atomic_load_n(counter, __ATOMIC_ACQUIRE);
883 }
884 
kick_tx_with_check(struct xsk_socket_info * xsk,int * ret)885 static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret)
886 {
887 	u32 max_budget = MAX_TX_BUDGET_DEFAULT;
888 	u32 cons, ready_to_send;
889 	int delta;
890 
891 	cons = load_value(xsk->tx.consumer);
892 	ready_to_send = load_value(xsk->tx.producer) - cons;
893 	*ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
894 
895 	delta = load_value(xsk->tx.consumer) - cons;
896 	/* By default, xsk should consume exact @max_budget descs at one
897 	 * send in this case where hitting the max budget limit in while
898 	 * loop is triggered in __xsk_generic_xmit(). Please make sure that
899 	 * the number of descs to be sent is larger than @max_budget, or
900 	 * else the tx.consumer will be updated in xskq_cons_peek_desc()
901 	 * in time which hides the issue we try to verify.
902 	 */
903 	if (ready_to_send > max_budget && delta != max_budget)
904 		return false;
905 
906 	return true;
907 }
908 
kick_tx(struct xsk_socket_info * xsk)909 int kick_tx(struct xsk_socket_info *xsk)
910 {
911 	int ret;
912 
913 	if (xsk->check_consumer) {
914 		if (!kick_tx_with_check(xsk, &ret))
915 			return TEST_FAILURE;
916 	} else {
917 		ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
918 	}
919 	if (ret >= 0)
920 		return TEST_PASS;
921 	if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
922 		usleep(100);
923 		return TEST_PASS;
924 	}
925 	return TEST_FAILURE;
926 }
927 
kick_rx(struct xsk_socket_info * xsk)928 int kick_rx(struct xsk_socket_info *xsk)
929 {
930 	int ret;
931 
932 	ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
933 	if (ret < 0)
934 		return TEST_FAILURE;
935 
936 	return TEST_PASS;
937 }
938 
complete_pkts(struct xsk_socket_info * xsk,int batch_size)939 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
940 {
941 	unsigned int rcvd;
942 	u32 idx;
943 	int ret;
944 
945 	if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
946 		ret = kick_tx(xsk);
947 		if (ret)
948 			return TEST_FAILURE;
949 	}
950 
951 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
952 	if (rcvd) {
953 		if (rcvd > xsk->outstanding_tx) {
954 			u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
955 
956 			ksft_print_msg("[%s] Too many packets completed\n", __func__);
957 			ksft_print_msg("Last completion address: %llx\n",
958 				       (unsigned long long)addr);
959 			return TEST_FAILURE;
960 		}
961 
962 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
963 		xsk->outstanding_tx -= rcvd;
964 	}
965 
966 	return TEST_PASS;
967 }
968 
__receive_pkts(struct test_spec * test,struct xsk_socket_info * xsk)969 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
970 {
971 	u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
972 	u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
973 	struct pkt_stream *pkt_stream = xsk->pkt_stream;
974 	struct ifobject *ifobj = test->ifobj_rx;
975 	struct xsk_umem_info *umem = xsk->umem;
976 	struct pollfd fds = { };
977 	struct pkt *pkt;
978 	u64 first_addr = 0;
979 	int ret;
980 
981 	fds.fd = xsk_socket__fd(xsk->xsk);
982 	fds.events = POLLIN;
983 
984 	ret = kick_rx(xsk);
985 	if (ret)
986 		return TEST_FAILURE;
987 
988 	if (ifobj->use_poll) {
989 		ret = poll(&fds, 1, POLL_TMOUT);
990 		if (ret < 0)
991 			return TEST_FAILURE;
992 
993 		if (!ret) {
994 			if (!is_umem_valid(test->ifobj_tx))
995 				return TEST_PASS;
996 
997 			ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
998 			return TEST_CONTINUE;
999 		}
1000 
1001 		if (!(fds.revents & POLLIN))
1002 			return TEST_CONTINUE;
1003 	}
1004 
1005 	rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
1006 	if (!rcvd)
1007 		return TEST_CONTINUE;
1008 
1009 	if (ifobj->use_fill_ring) {
1010 		ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1011 		while (ret != rcvd) {
1012 			if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
1013 				ret = poll(&fds, 1, POLL_TMOUT);
1014 				if (ret < 0)
1015 					return TEST_FAILURE;
1016 			}
1017 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1018 		}
1019 	}
1020 
1021 	while (frags_processed < rcvd) {
1022 		const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
1023 		u64 addr = desc->addr, orig;
1024 
1025 		orig = xsk_umem__extract_addr(addr);
1026 		addr = xsk_umem__add_offset_to_addr(addr);
1027 
1028 		if (!nb_frags) {
1029 			pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
1030 			if (!pkt) {
1031 				ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
1032 					       __func__, addr, desc->len);
1033 				return TEST_FAILURE;
1034 			}
1035 		}
1036 
1037 		print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
1038 			      addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
1039 
1040 		if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
1041 		    !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
1042 		    !is_metadata_correct(pkt, umem->buffer, addr)))
1043 			return TEST_FAILURE;
1044 
1045 		if (!nb_frags++)
1046 			first_addr = addr;
1047 		frags_processed++;
1048 		pkt_len += desc->len;
1049 		if (ifobj->use_fill_ring)
1050 			*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
1051 
1052 		if (pkt_continues(desc->options))
1053 			continue;
1054 
1055 		/* The complete packet has been received */
1056 		if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
1057 		    !is_offset_correct(umem, pkt, addr))
1058 			return TEST_FAILURE;
1059 
1060 		pkt_stream->nb_rx_pkts++;
1061 		nb_frags = 0;
1062 		pkt_len = 0;
1063 	}
1064 
1065 	if (nb_frags) {
1066 		/* In the middle of a packet. Start over from beginning of packet. */
1067 		idx_rx -= nb_frags;
1068 		xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1069 		if (ifobj->use_fill_ring) {
1070 			idx_fq -= nb_frags;
1071 			xsk_ring_prod__cancel(&umem->fq, nb_frags);
1072 		}
1073 		frags_processed -= nb_frags;
1074 		pkt_stream_cancel(pkt_stream);
1075 		pkts_sent--;
1076 	}
1077 
1078 	if (ifobj->use_fill_ring)
1079 		xsk_ring_prod__submit(&umem->fq, frags_processed);
1080 	if (ifobj->release_rx)
1081 		xsk_ring_cons__release(&xsk->rx, frags_processed);
1082 
1083 	pthread_mutex_lock(&pacing_mutex);
1084 	pkts_in_flight -= pkts_sent;
1085 	pthread_mutex_unlock(&pacing_mutex);
1086 	pkts_sent = 0;
1087 
1088 	return TEST_CONTINUE;
1089 }
1090 
all_packets_received(struct test_spec * test,struct xsk_socket_info * xsk,u32 sock_num,unsigned long * bitmap)1091 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
1092 			  unsigned long *bitmap)
1093 {
1094 	struct pkt_stream *pkt_stream = xsk->pkt_stream;
1095 
1096 	if (!pkt_stream) {
1097 		__set_bit(sock_num, bitmap);
1098 		return false;
1099 	}
1100 
1101 	if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
1102 		__set_bit(sock_num, bitmap);
1103 		if (bitmap_full(bitmap, test->nb_sockets))
1104 			return true;
1105 	}
1106 
1107 	return false;
1108 }
1109 
receive_pkts(struct test_spec * test)1110 static int receive_pkts(struct test_spec *test)
1111 {
1112 	struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1113 	DECLARE_BITMAP(bitmap, test->nb_sockets);
1114 	struct xsk_socket_info *xsk;
1115 	u32 sock_num = 0;
1116 	int res, ret;
1117 
1118 	bitmap_zero(bitmap, test->nb_sockets);
1119 
1120 	ret = gettimeofday(&tv_now, NULL);
1121 	if (ret)
1122 		return TEST_FAILURE;
1123 
1124 	timeradd(&tv_now, &tv_timeout, &tv_end);
1125 
1126 	while (1) {
1127 		xsk = &test->ifobj_rx->xsk_arr[sock_num];
1128 
1129 		if ((all_packets_received(test, xsk, sock_num, bitmap)))
1130 			break;
1131 
1132 		res = __receive_pkts(test, xsk);
1133 		if (!(res == TEST_PASS || res == TEST_CONTINUE))
1134 			return res;
1135 
1136 		ret = gettimeofday(&tv_now, NULL);
1137 		if (ret)
1138 			return TEST_FAILURE;
1139 
1140 		if (timercmp(&tv_now, &tv_end, >)) {
1141 			ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
1142 			return TEST_FAILURE;
1143 		}
1144 		sock_num = (sock_num + 1) % test->nb_sockets;
1145 	}
1146 
1147 	return TEST_PASS;
1148 }
1149 
__send_pkts(struct ifobject * ifobject,struct xsk_socket_info * xsk,bool timeout)1150 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
1151 {
1152 	u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
1153 	struct pkt_stream *pkt_stream = xsk->pkt_stream;
1154 	struct xsk_umem_info *umem = ifobject->umem;
1155 	bool use_poll = ifobject->use_poll;
1156 	struct pollfd fds = { };
1157 	int ret;
1158 
1159 	buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
1160 	/* pkts_in_flight might be negative if many invalid packets are sent */
1161 	if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
1162 	    buffer_len)) {
1163 		ret = kick_tx(xsk);
1164 		if (ret)
1165 			return TEST_FAILURE;
1166 		return TEST_CONTINUE;
1167 	}
1168 
1169 	fds.fd = xsk_socket__fd(xsk->xsk);
1170 	fds.events = POLLOUT;
1171 
1172 	while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
1173 		if (use_poll) {
1174 			ret = poll(&fds, 1, POLL_TMOUT);
1175 			if (timeout) {
1176 				if (ret < 0) {
1177 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
1178 						       __func__, errno);
1179 					return TEST_FAILURE;
1180 				}
1181 				if (ret == 0)
1182 					return TEST_PASS;
1183 				break;
1184 			}
1185 			if (ret <= 0) {
1186 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
1187 					       __func__, errno);
1188 				return TEST_FAILURE;
1189 			}
1190 		}
1191 
1192 		complete_pkts(xsk, xsk->batch_size);
1193 	}
1194 
1195 	for (i = 0; i < xsk->batch_size; i++) {
1196 		struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1197 		u32 nb_frags_left, nb_frags, bytes_written = 0;
1198 
1199 		if (!pkt)
1200 			break;
1201 
1202 		nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
1203 		if (nb_frags > xsk->batch_size - i) {
1204 			pkt_stream_cancel(pkt_stream);
1205 			xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
1206 			break;
1207 		}
1208 		nb_frags_left = nb_frags;
1209 
1210 		while (nb_frags_left--) {
1211 			struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1212 
1213 			tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
1214 			if (pkt_stream->verbatim) {
1215 				tx_desc->len = pkt->len;
1216 				tx_desc->options = pkt->options;
1217 			} else if (nb_frags_left) {
1218 				tx_desc->len = umem->frame_size;
1219 				tx_desc->options = XDP_PKT_CONTD;
1220 			} else {
1221 				tx_desc->len = pkt->len - bytes_written;
1222 				tx_desc->options = 0;
1223 			}
1224 			if (pkt->valid)
1225 				pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1226 					     bytes_written);
1227 			bytes_written += tx_desc->len;
1228 
1229 			print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
1230 				      tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
1231 
1232 			if (nb_frags_left) {
1233 				i++;
1234 				if (pkt_stream->verbatim)
1235 					pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1236 			}
1237 		}
1238 
1239 		if (pkt && pkt->valid) {
1240 			valid_pkts++;
1241 			valid_frags += nb_frags;
1242 		}
1243 	}
1244 
1245 	pthread_mutex_lock(&pacing_mutex);
1246 	pkts_in_flight += valid_pkts;
1247 	pthread_mutex_unlock(&pacing_mutex);
1248 
1249 	xsk_ring_prod__submit(&xsk->tx, i);
1250 	xsk->outstanding_tx += valid_frags;
1251 
1252 	if (use_poll) {
1253 		ret = poll(&fds, 1, POLL_TMOUT);
1254 		if (ret <= 0) {
1255 			if (ret == 0 && timeout)
1256 				return TEST_PASS;
1257 
1258 			ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1259 			return TEST_FAILURE;
1260 		}
1261 	}
1262 
1263 	if (!timeout) {
1264 		if (complete_pkts(xsk, i))
1265 			return TEST_FAILURE;
1266 
1267 		usleep(10);
1268 		return TEST_PASS;
1269 	}
1270 
1271 	return TEST_CONTINUE;
1272 }
1273 
wait_for_tx_completion(struct xsk_socket_info * xsk)1274 static int wait_for_tx_completion(struct xsk_socket_info *xsk)
1275 {
1276 	struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1277 	int ret;
1278 
1279 	ret = gettimeofday(&tv_now, NULL);
1280 	if (ret)
1281 		return TEST_FAILURE;
1282 	timeradd(&tv_now, &tv_timeout, &tv_end);
1283 
1284 	while (xsk->outstanding_tx) {
1285 		ret = gettimeofday(&tv_now, NULL);
1286 		if (ret)
1287 			return TEST_FAILURE;
1288 		if (timercmp(&tv_now, &tv_end, >)) {
1289 			ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
1290 			return TEST_FAILURE;
1291 		}
1292 
1293 		complete_pkts(xsk, xsk->batch_size);
1294 	}
1295 
1296 	return TEST_PASS;
1297 }
1298 
all_packets_sent(struct test_spec * test,unsigned long * bitmap)1299 bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
1300 {
1301 	return bitmap_full(bitmap, test->nb_sockets);
1302 }
1303 
send_pkts(struct test_spec * test,struct ifobject * ifobject)1304 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1305 {
1306 	bool timeout = !is_umem_valid(test->ifobj_rx);
1307 	DECLARE_BITMAP(bitmap, test->nb_sockets);
1308 	u32 i, ret;
1309 
1310 	bitmap_zero(bitmap, test->nb_sockets);
1311 
1312 	while (!(all_packets_sent(test, bitmap))) {
1313 		for (i = 0; i < test->nb_sockets; i++) {
1314 			struct pkt_stream *pkt_stream;
1315 
1316 			pkt_stream = ifobject->xsk_arr[i].pkt_stream;
1317 			if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
1318 				__set_bit(i, bitmap);
1319 				continue;
1320 			}
1321 			ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
1322 			if (ret == TEST_CONTINUE && !test->fail)
1323 				continue;
1324 
1325 			if ((ret || test->fail) && !timeout)
1326 				return TEST_FAILURE;
1327 
1328 			if (ret == TEST_PASS && timeout)
1329 				return ret;
1330 
1331 			ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
1332 			if (ret)
1333 				return TEST_FAILURE;
1334 		}
1335 	}
1336 
1337 	return TEST_PASS;
1338 }
1339 
get_xsk_stats(struct xsk_socket * xsk,struct xdp_statistics * stats)1340 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1341 {
1342 	int fd = xsk_socket__fd(xsk), err;
1343 	socklen_t optlen, expected_len;
1344 
1345 	optlen = sizeof(*stats);
1346 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1347 	if (err) {
1348 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1349 			       __func__, -err, strerror(-err));
1350 		return TEST_FAILURE;
1351 	}
1352 
1353 	expected_len = sizeof(struct xdp_statistics);
1354 	if (optlen != expected_len) {
1355 		ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1356 			       __func__, expected_len, optlen);
1357 		return TEST_FAILURE;
1358 	}
1359 
1360 	return TEST_PASS;
1361 }
1362 
validate_rx_dropped(struct ifobject * ifobject)1363 static int validate_rx_dropped(struct ifobject *ifobject)
1364 {
1365 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1366 	struct xdp_statistics stats;
1367 	int err;
1368 
1369 	err = kick_rx(ifobject->xsk);
1370 	if (err)
1371 		return TEST_FAILURE;
1372 
1373 	err = get_xsk_stats(xsk, &stats);
1374 	if (err)
1375 		return TEST_FAILURE;
1376 
1377 	/* The receiver calls getsockopt after receiving the last (valid)
1378 	 * packet which is not the final packet sent in this test (valid and
1379 	 * invalid packets are sent in alternating fashion with the final
1380 	 * packet being invalid). Since the last packet may or may not have
1381 	 * been dropped already, both outcomes must be allowed.
1382 	 */
1383 	if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
1384 	    stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
1385 		return TEST_PASS;
1386 
1387 	return TEST_FAILURE;
1388 }
1389 
validate_rx_full(struct ifobject * ifobject)1390 static int validate_rx_full(struct ifobject *ifobject)
1391 {
1392 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1393 	struct xdp_statistics stats;
1394 	int err;
1395 
1396 	usleep(1000);
1397 	err = kick_rx(ifobject->xsk);
1398 	if (err)
1399 		return TEST_FAILURE;
1400 
1401 	err = get_xsk_stats(xsk, &stats);
1402 	if (err)
1403 		return TEST_FAILURE;
1404 
1405 	if (stats.rx_ring_full)
1406 		return TEST_PASS;
1407 
1408 	return TEST_FAILURE;
1409 }
1410 
validate_fill_empty(struct ifobject * ifobject)1411 static int validate_fill_empty(struct ifobject *ifobject)
1412 {
1413 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1414 	struct xdp_statistics stats;
1415 	int err;
1416 
1417 	usleep(1000);
1418 	err = kick_rx(ifobject->xsk);
1419 	if (err)
1420 		return TEST_FAILURE;
1421 
1422 	err = get_xsk_stats(xsk, &stats);
1423 	if (err)
1424 		return TEST_FAILURE;
1425 
1426 	if (stats.rx_fill_ring_empty_descs)
1427 		return TEST_PASS;
1428 
1429 	return TEST_FAILURE;
1430 }
1431 
validate_tx_invalid_descs(struct ifobject * ifobject)1432 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1433 {
1434 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1435 	int fd = xsk_socket__fd(xsk);
1436 	struct xdp_statistics stats;
1437 	socklen_t optlen;
1438 	int err;
1439 
1440 	optlen = sizeof(stats);
1441 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1442 	if (err) {
1443 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1444 			       __func__, -err, strerror(-err));
1445 		return TEST_FAILURE;
1446 	}
1447 
1448 	if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
1449 		ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
1450 			       __func__,
1451 			       (unsigned long long)stats.tx_invalid_descs,
1452 			       ifobject->xsk->pkt_stream->nb_pkts);
1453 		return TEST_FAILURE;
1454 	}
1455 
1456 	return TEST_PASS;
1457 }
1458 
xsk_configure(struct test_spec * test,struct ifobject * ifobject,struct xsk_umem_info * umem,bool tx)1459 static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
1460 			  struct xsk_umem_info *umem, bool tx)
1461 {
1462 	int i, ret;
1463 
1464 	for (i = 0; i < test->nb_sockets; i++) {
1465 		bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1466 		u32 ctr = 0;
1467 
1468 		while (ctr++ < SOCK_RECONF_CTR) {
1469 			ret = xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1470 						     ifobject, shared);
1471 			if (!ret)
1472 				break;
1473 
1474 			/* Retry if it fails as xsk_socket__create() is asynchronous */
1475 			if (ctr >= SOCK_RECONF_CTR)
1476 				return ret;
1477 			usleep(USLEEP_MAX);
1478 		}
1479 		if (ifobject->busy_poll) {
1480 			ret = enable_busy_poll(&ifobject->xsk_arr[i]);
1481 			if (ret)
1482 				return ret;
1483 		}
1484 	}
1485 
1486 	return 0;
1487 }
1488 
thread_common_ops_tx(struct test_spec * test,struct ifobject * ifobject)1489 static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1490 {
1491 	int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
1492 
1493 	if (ret)
1494 		return ret;
1495 	ifobject->xsk = &ifobject->xsk_arr[0];
1496 	ifobject->xskmap = test->ifobj_rx->xskmap;
1497 	memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1498 	ifobject->umem->base_addr = 0;
1499 
1500 	return 0;
1501 }
1502 
xsk_populate_fill_ring(struct xsk_umem_info * umem,struct pkt_stream * pkt_stream,bool fill_up)1503 static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
1504 				   bool fill_up)
1505 {
1506 	u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
1507 	u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
1508 	int ret;
1509 
1510 	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1511 		buffers_to_fill = umem->num_frames;
1512 	else
1513 		buffers_to_fill = umem->fill_size;
1514 
1515 	ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1516 	if (ret != buffers_to_fill)
1517 		return -ENOSPC;
1518 
1519 	while (filled < buffers_to_fill) {
1520 		struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
1521 		u64 addr;
1522 		u32 i;
1523 
1524 		for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
1525 			if (!pkt) {
1526 				if (!fill_up)
1527 					break;
1528 				addr = filled * umem->frame_size + umem->base_addr;
1529 			} else if (pkt->offset >= 0) {
1530 				addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
1531 			} else {
1532 				addr = pkt->offset + umem_alloc_buffer(umem);
1533 			}
1534 
1535 			*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1536 			if (++filled >= buffers_to_fill)
1537 				break;
1538 		}
1539 	}
1540 	xsk_ring_prod__submit(&umem->fq, filled);
1541 	xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
1542 
1543 	pkt_stream_reset(pkt_stream);
1544 	umem_reset_alloc(umem);
1545 
1546 	return 0;
1547 }
1548 
thread_common_ops(struct test_spec * test,struct ifobject * ifobject)1549 static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1550 {
1551 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1552 	int mmap_flags;
1553 	u64 umem_sz;
1554 	void *bufs;
1555 	int ret;
1556 	u32 i;
1557 
1558 	umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1559 	mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1560 
1561 	if (ifobject->umem->unaligned_mode)
1562 		mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
1563 
1564 	if (ifobject->shared_umem)
1565 		umem_sz *= 2;
1566 
1567 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1568 	if (bufs == MAP_FAILED)
1569 		return -errno;
1570 
1571 	ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
1572 	if (ret)
1573 		return ret;
1574 
1575 	ret = xsk_configure(test, ifobject, ifobject->umem, false);
1576 	if (ret)
1577 		return ret;
1578 
1579 	ifobject->xsk = &ifobject->xsk_arr[0];
1580 
1581 	if (!ifobject->rx_on)
1582 		return 0;
1583 
1584 	ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
1585 				     ifobject->use_fill_ring);
1586 	if (ret)
1587 		return ret;
1588 
1589 	for (i = 0; i < test->nb_sockets; i++) {
1590 		ifobject->xsk = &ifobject->xsk_arr[i];
1591 		ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
1592 		if (ret)
1593 			return ret;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
worker_testapp_validate_tx(void * arg)1599 void *worker_testapp_validate_tx(void *arg)
1600 {
1601 	struct test_spec *test = (struct test_spec *)arg;
1602 	struct ifobject *ifobject = test->ifobj_tx;
1603 	int err;
1604 
1605 	if (test->current_step == 1) {
1606 		if (!ifobject->shared_umem) {
1607 			if (thread_common_ops(test, ifobject)) {
1608 				test->fail = true;
1609 				pthread_exit(NULL);
1610 			}
1611 		} else {
1612 			if (thread_common_ops_tx(test, ifobject)) {
1613 				test->fail = true;
1614 				pthread_exit(NULL);
1615 			}
1616 		}
1617 	}
1618 
1619 	err = send_pkts(test, ifobject);
1620 
1621 	if (!err && ifobject->validation_func)
1622 		err = ifobject->validation_func(ifobject);
1623 	if (err)
1624 		test->fail = true;
1625 
1626 	pthread_exit(NULL);
1627 }
1628 
worker_testapp_validate_rx(void * arg)1629 void *worker_testapp_validate_rx(void *arg)
1630 {
1631 	struct test_spec *test = (struct test_spec *)arg;
1632 	struct ifobject *ifobject = test->ifobj_rx;
1633 	int err;
1634 
1635 	if (test->current_step == 1) {
1636 		err = thread_common_ops(test, ifobject);
1637 	} else {
1638 		xsk_clear_xskmap(ifobject->xskmap);
1639 		err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
1640 		if (err)
1641 			ksft_print_msg("Error: Failed to update xskmap, error %s\n",
1642 				       strerror(-err));
1643 	}
1644 
1645 	pthread_barrier_wait(&barr);
1646 
1647 	/* We leave only now in case of error to avoid getting stuck in the barrier */
1648 	if (err) {
1649 		test->fail = true;
1650 		pthread_exit(NULL);
1651 	}
1652 
1653 	err = receive_pkts(test);
1654 
1655 	if (!err && ifobject->validation_func)
1656 		err = ifobject->validation_func(ifobject);
1657 
1658 	if (err) {
1659 		if (!test->adjust_tail) {
1660 			test->fail = true;
1661 		} else {
1662 			bool supported;
1663 
1664 			if (is_adjust_tail_supported(ifobject->xdp_progs, &supported))
1665 				test->fail = true;
1666 			else if (!supported)
1667 				test->adjust_tail_support = false;
1668 			else
1669 				test->fail = true;
1670 		}
1671 	}
1672 
1673 	pthread_exit(NULL);
1674 }
1675 
testapp_clean_xsk_umem(struct ifobject * ifobj)1676 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1677 {
1678 	u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1679 
1680 	if (ifobj->shared_umem)
1681 		umem_sz *= 2;
1682 
1683 	umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1684 	xsk_umem__delete(ifobj->umem->umem);
1685 	munmap(ifobj->umem->buffer, umem_sz);
1686 }
1687 
handler(int signum)1688 static void handler(int signum)
1689 {
1690 	pthread_exit(NULL);
1691 }
1692 
xdp_prog_changed_rx(struct test_spec * test)1693 static bool xdp_prog_changed_rx(struct test_spec *test)
1694 {
1695 	struct ifobject *ifobj = test->ifobj_rx;
1696 
1697 	return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
1698 }
1699 
xdp_prog_changed_tx(struct test_spec * test)1700 static bool xdp_prog_changed_tx(struct test_spec *test)
1701 {
1702 	struct ifobject *ifobj = test->ifobj_tx;
1703 
1704 	return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
1705 }
1706 
xsk_reattach_xdp(struct ifobject * ifobj,struct bpf_program * xdp_prog,struct bpf_map * xskmap,enum test_mode mode)1707 static int xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
1708 			     struct bpf_map *xskmap, enum test_mode mode)
1709 {
1710 	int err;
1711 
1712 	xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
1713 	err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
1714 	if (err) {
1715 		ksft_print_msg("Error attaching XDP program\n");
1716 		return err;
1717 	}
1718 
1719 	if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
1720 		if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
1721 			ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1722 			return -EINVAL;
1723 		}
1724 
1725 	ifobj->xdp_prog = xdp_prog;
1726 	ifobj->xskmap = xskmap;
1727 	ifobj->mode = mode;
1728 
1729 	return 0;
1730 }
1731 
xsk_attach_xdp_progs(struct test_spec * test,struct ifobject * ifobj_rx,struct ifobject * ifobj_tx)1732 static int xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
1733 				 struct ifobject *ifobj_tx)
1734 {
1735 	int err = 0;
1736 
1737 	if (xdp_prog_changed_rx(test)) {
1738 		err = xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
1739 		if (err)
1740 			return err;
1741 	}
1742 
1743 	if (!ifobj_tx || ifobj_tx->shared_umem)
1744 		return 0;
1745 
1746 	if (xdp_prog_changed_tx(test))
1747 		err = xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
1748 
1749 	return err;
1750 }
1751 
clean_sockets(struct test_spec * test,struct ifobject * ifobj)1752 static void clean_sockets(struct test_spec *test, struct ifobject *ifobj)
1753 {
1754 	u32 i;
1755 
1756 	if (!ifobj || !test)
1757 		return;
1758 
1759 	for (i = 0; i < test->nb_sockets; i++)
1760 		xsk_socket__delete(ifobj->xsk_arr[i].xsk);
1761 }
1762 
clean_umem(struct test_spec * test,struct ifobject * ifobj1,struct ifobject * ifobj2)1763 static void clean_umem(struct test_spec *test, struct ifobject *ifobj1, struct ifobject *ifobj2)
1764 {
1765 	if (!ifobj1)
1766 		return;
1767 
1768 	testapp_clean_xsk_umem(ifobj1);
1769 	if (ifobj2 && !ifobj2->shared_umem)
1770 		testapp_clean_xsk_umem(ifobj2);
1771 }
1772 
__testapp_validate_traffic(struct test_spec * test,struct ifobject * ifobj1,struct ifobject * ifobj2)1773 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
1774 				      struct ifobject *ifobj2)
1775 {
1776 	pthread_t t0, t1;
1777 	int err;
1778 
1779 	if (test->mtu > MAX_ETH_PKT_SIZE) {
1780 		if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
1781 						   (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
1782 			ksft_print_msg("Multi buffer for zero-copy not supported.\n");
1783 			return TEST_SKIP;
1784 		}
1785 		if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
1786 						   (ifobj2 && !ifobj2->multi_buff_supp))) {
1787 			ksft_print_msg("Multi buffer not supported.\n");
1788 			return TEST_SKIP;
1789 		}
1790 	}
1791 	err = test_spec_set_mtu(test, test->mtu);
1792 	if (err) {
1793 		ksft_print_msg("Error, could not set mtu.\n");
1794 		return TEST_FAILURE;
1795 	}
1796 
1797 	if (ifobj2) {
1798 		if (pthread_barrier_init(&barr, NULL, 2))
1799 			return TEST_FAILURE;
1800 		pkt_stream_reset(ifobj2->xsk->pkt_stream);
1801 	}
1802 
1803 	test->current_step++;
1804 	pkt_stream_reset(ifobj1->xsk->pkt_stream);
1805 	pkts_in_flight = 0;
1806 
1807 	signal(SIGUSR1, handler);
1808 	/*Spawn RX thread */
1809 	pthread_create(&t0, NULL, ifobj1->func_ptr, test);
1810 
1811 	if (ifobj2) {
1812 		pthread_barrier_wait(&barr);
1813 		if (pthread_barrier_destroy(&barr)) {
1814 			pthread_kill(t0, SIGUSR1);
1815 			clean_sockets(test, ifobj1);
1816 			clean_umem(test, ifobj1, NULL);
1817 			return TEST_FAILURE;
1818 		}
1819 
1820 		/*Spawn TX thread */
1821 		pthread_create(&t1, NULL, ifobj2->func_ptr, test);
1822 
1823 		pthread_join(t1, NULL);
1824 	}
1825 
1826 	if (!ifobj2)
1827 		pthread_kill(t0, SIGUSR1);
1828 	else
1829 		pthread_join(t0, NULL);
1830 
1831 	if (test->total_steps == test->current_step || test->fail) {
1832 		clean_sockets(test, ifobj1);
1833 		clean_sockets(test, ifobj2);
1834 		clean_umem(test, ifobj1, ifobj2);
1835 	}
1836 
1837 	if (test->fail)
1838 		return TEST_FAILURE;
1839 
1840 	return TEST_PASS;
1841 }
1842 
testapp_validate_traffic(struct test_spec * test)1843 static int testapp_validate_traffic(struct test_spec *test)
1844 {
1845 	struct ifobject *ifobj_rx = test->ifobj_rx;
1846 	struct ifobject *ifobj_tx = test->ifobj_tx;
1847 
1848 	if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
1849 	    (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
1850 		ksft_print_msg("No huge pages present.\n");
1851 		return TEST_SKIP;
1852 	}
1853 
1854 	if (test->set_ring) {
1855 		if (ifobj_tx->hw_ring_size_supp) {
1856 			if (set_ring_size(ifobj_tx)) {
1857 				ksft_print_msg("Failed to change HW ring size.\n");
1858 				return TEST_FAILURE;
1859 			}
1860 		} else {
1861 			ksft_print_msg("Changing HW ring size not supported.\n");
1862 			return TEST_SKIP;
1863 		}
1864 	}
1865 
1866 	if (xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx))
1867 		return TEST_FAILURE;
1868 	return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
1869 }
1870 
testapp_validate_traffic_single_thread(struct test_spec * test,struct ifobject * ifobj)1871 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
1872 {
1873 	return __testapp_validate_traffic(test, ifobj, NULL);
1874 }
1875 
testapp_teardown(struct test_spec * test)1876 int testapp_teardown(struct test_spec *test)
1877 {
1878 	int i;
1879 
1880 	for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1881 		if (testapp_validate_traffic(test))
1882 			return TEST_FAILURE;
1883 		test_spec_reset(test);
1884 	}
1885 
1886 	return TEST_PASS;
1887 }
1888 
swap_directions(struct ifobject ** ifobj1,struct ifobject ** ifobj2)1889 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1890 {
1891 	thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1892 	struct ifobject *tmp_ifobj = (*ifobj1);
1893 
1894 	(*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1895 	(*ifobj2)->func_ptr = tmp_func_ptr;
1896 
1897 	*ifobj1 = *ifobj2;
1898 	*ifobj2 = tmp_ifobj;
1899 }
1900 
testapp_bidirectional(struct test_spec * test)1901 int testapp_bidirectional(struct test_spec *test)
1902 {
1903 	int res;
1904 
1905 	test->ifobj_tx->rx_on = true;
1906 	test->ifobj_rx->tx_on = true;
1907 	test->total_steps = 2;
1908 	if (testapp_validate_traffic(test))
1909 		return TEST_FAILURE;
1910 
1911 	print_verbose("Switching Tx/Rx direction\n");
1912 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1913 	res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
1914 
1915 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1916 	return res;
1917 }
1918 
swap_xsk_resources(struct test_spec * test)1919 static int swap_xsk_resources(struct test_spec *test)
1920 {
1921 	int ret;
1922 
1923 	test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
1924 	test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
1925 	test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
1926 	test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
1927 	test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
1928 	test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
1929 
1930 	ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
1931 	if (ret)
1932 		return TEST_FAILURE;
1933 
1934 	return TEST_PASS;
1935 }
1936 
testapp_xdp_prog_cleanup(struct test_spec * test)1937 int testapp_xdp_prog_cleanup(struct test_spec *test)
1938 {
1939 	test->total_steps = 2;
1940 	test->nb_sockets = 2;
1941 	if (testapp_validate_traffic(test))
1942 		return TEST_FAILURE;
1943 
1944 	if (swap_xsk_resources(test)) {
1945 		clean_sockets(test, test->ifobj_rx);
1946 		clean_sockets(test, test->ifobj_tx);
1947 		clean_umem(test, test->ifobj_rx, test->ifobj_tx);
1948 		return TEST_FAILURE;
1949 	}
1950 
1951 	return testapp_validate_traffic(test);
1952 }
1953 
testapp_headroom(struct test_spec * test)1954 int testapp_headroom(struct test_spec *test)
1955 {
1956 	test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1957 	return testapp_validate_traffic(test);
1958 }
1959 
testapp_stats_rx_dropped(struct test_spec * test)1960 int testapp_stats_rx_dropped(struct test_spec *test)
1961 {
1962 	u32 umem_tr = test->ifobj_tx->umem_tailroom;
1963 
1964 	if (test->mode == TEST_MODE_ZC) {
1965 		ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
1966 		return TEST_SKIP;
1967 	}
1968 
1969 	if (pkt_stream_replace_half(test, (MIN_PKT_SIZE * 3) + umem_tr, 0))
1970 		return TEST_FAILURE;
1971 	test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1972 		XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr;
1973 	if (pkt_stream_receive_half(test))
1974 		return TEST_FAILURE;
1975 	test->ifobj_rx->validation_func = validate_rx_dropped;
1976 	return testapp_validate_traffic(test);
1977 }
1978 
testapp_stats_tx_invalid_descs(struct test_spec * test)1979 int testapp_stats_tx_invalid_descs(struct test_spec *test)
1980 {
1981 	if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0))
1982 		return TEST_FAILURE;
1983 	test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1984 	return testapp_validate_traffic(test);
1985 }
1986 
testapp_stats_rx_full(struct test_spec * test)1987 int testapp_stats_rx_full(struct test_spec *test)
1988 {
1989 	struct pkt_stream *tmp;
1990 
1991 	tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
1992 	if (!tmp)
1993 		return TEST_FAILURE;
1994 	test->ifobj_tx->xsk->pkt_stream = tmp;
1995 
1996 	tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
1997 	if (!tmp)
1998 		return TEST_FAILURE;
1999 	test->ifobj_rx->xsk->pkt_stream = tmp;
2000 
2001 	test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
2002 	test->ifobj_rx->release_rx = false;
2003 	test->ifobj_rx->validation_func = validate_rx_full;
2004 	return testapp_validate_traffic(test);
2005 }
2006 
testapp_stats_fill_empty(struct test_spec * test)2007 int testapp_stats_fill_empty(struct test_spec *test)
2008 {
2009 	struct pkt_stream *tmp;
2010 
2011 	tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
2012 	if (!tmp)
2013 		return TEST_FAILURE;
2014 	test->ifobj_tx->xsk->pkt_stream = tmp;
2015 
2016 	tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2017 	if (!tmp)
2018 		return TEST_FAILURE;
2019 	test->ifobj_rx->xsk->pkt_stream = tmp;
2020 
2021 	test->ifobj_rx->use_fill_ring = false;
2022 	test->ifobj_rx->validation_func = validate_fill_empty;
2023 	return testapp_validate_traffic(test);
2024 }
2025 
testapp_send_receive_unaligned(struct test_spec * test)2026 int testapp_send_receive_unaligned(struct test_spec *test)
2027 {
2028 	test->ifobj_tx->umem->unaligned_mode = true;
2029 	test->ifobj_rx->umem->unaligned_mode = true;
2030 	/* Let half of the packets straddle a 4K buffer boundary */
2031 	if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
2032 		return TEST_FAILURE;
2033 
2034 	return testapp_validate_traffic(test);
2035 }
2036 
testapp_send_receive_unaligned_mb(struct test_spec * test)2037 int testapp_send_receive_unaligned_mb(struct test_spec *test)
2038 {
2039 	test->mtu = MAX_ETH_JUMBO_SIZE;
2040 	test->ifobj_tx->umem->unaligned_mode = true;
2041 	test->ifobj_rx->umem->unaligned_mode = true;
2042 	if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2043 		return TEST_FAILURE;
2044 	return testapp_validate_traffic(test);
2045 }
2046 
testapp_single_pkt(struct test_spec * test)2047 int testapp_single_pkt(struct test_spec *test)
2048 {
2049 	struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
2050 
2051 	if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2052 		return TEST_FAILURE;
2053 	return testapp_validate_traffic(test);
2054 }
2055 
testapp_send_receive_mb(struct test_spec * test)2056 int testapp_send_receive_mb(struct test_spec *test)
2057 {
2058 	test->mtu = MAX_ETH_JUMBO_SIZE;
2059 	if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2060 		return TEST_FAILURE;
2061 
2062 	return testapp_validate_traffic(test);
2063 }
2064 
testapp_invalid_desc_mb(struct test_spec * test)2065 int testapp_invalid_desc_mb(struct test_spec *test)
2066 {
2067 	struct xsk_umem_info *umem = test->ifobj_tx->umem;
2068 	u64 umem_size = umem->num_frames * umem->frame_size;
2069 	struct pkt pkts[] = {
2070 		/* Valid packet for synch to start with */
2071 		{0, MIN_PKT_SIZE, 0, true, 0},
2072 		/* Zero frame len is not legal */
2073 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2074 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2075 		{0, 0, 0, false, 0},
2076 		/* Invalid address in the second frame */
2077 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2078 		{umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2079 		/* Invalid len in the middle */
2080 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2081 		{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2082 		/* Invalid options in the middle */
2083 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2084 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
2085 		/* Transmit 2 frags, receive 3 */
2086 		{0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
2087 		{0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
2088 		/* Middle frame crosses chunk boundary with small length */
2089 		{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2090 		{-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
2091 		/* Valid packet for synch so that something is received */
2092 		{0, MIN_PKT_SIZE, 0, true, 0}};
2093 
2094 	if (umem->unaligned_mode) {
2095 		/* Crossing a chunk boundary allowed */
2096 		pkts[12].valid = true;
2097 		pkts[13].valid = true;
2098 	}
2099 
2100 	test->mtu = MAX_ETH_JUMBO_SIZE;
2101 	if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2102 		return TEST_FAILURE;
2103 	return testapp_validate_traffic(test);
2104 }
2105 
testapp_invalid_desc(struct test_spec * test)2106 int testapp_invalid_desc(struct test_spec *test)
2107 {
2108 	struct xsk_umem_info *umem = test->ifobj_tx->umem;
2109 	u64 umem_size = umem->num_frames * umem->frame_size;
2110 	struct pkt pkts[] = {
2111 		/* Zero packet address allowed */
2112 		{0, MIN_PKT_SIZE, 0, true},
2113 		/* Allowed packet */
2114 		{0, MIN_PKT_SIZE, 0, true},
2115 		/* Straddling the start of umem */
2116 		{-2, MIN_PKT_SIZE, 0, false},
2117 		/* Packet too large */
2118 		{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
2119 		/* Up to end of umem allowed */
2120 		{umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
2121 		/* After umem ends */
2122 		{umem_size, MIN_PKT_SIZE, 0, false},
2123 		/* Straddle the end of umem */
2124 		{umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2125 		/* Straddle a 4K boundary */
2126 		{0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2127 		/* Straddle a 2K boundary */
2128 		{0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
2129 		/* Valid packet for synch so that something is received */
2130 		{0, MIN_PKT_SIZE, 0, true}};
2131 
2132 	if (umem->unaligned_mode) {
2133 		/* Crossing a page boundary allowed */
2134 		pkts[7].valid = true;
2135 	}
2136 	if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
2137 		/* Crossing a 2K frame size boundary not allowed */
2138 		pkts[8].valid = false;
2139 	}
2140 
2141 	if (test->ifobj_tx->shared_umem) {
2142 		pkts[4].offset += umem_size;
2143 		pkts[5].offset += umem_size;
2144 		pkts[6].offset += umem_size;
2145 	}
2146 
2147 	if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2148 		return TEST_FAILURE;
2149 	return testapp_validate_traffic(test);
2150 }
2151 
testapp_xdp_drop(struct test_spec * test)2152 int testapp_xdp_drop(struct test_spec *test)
2153 {
2154 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2155 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2156 
2157 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
2158 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
2159 
2160 	if (pkt_stream_receive_half(test))
2161 		return TEST_FAILURE;
2162 	return testapp_validate_traffic(test);
2163 }
2164 
testapp_xdp_metadata_copy(struct test_spec * test)2165 int testapp_xdp_metadata_copy(struct test_spec *test)
2166 {
2167 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2168 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2169 
2170 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
2171 			       skel_tx->progs.xsk_xdp_populate_metadata,
2172 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
2173 	test->ifobj_rx->use_metadata = true;
2174 
2175 	skel_rx->bss->count = 0;
2176 
2177 	return testapp_validate_traffic(test);
2178 }
2179 
testapp_xdp_shared_umem(struct test_spec * test)2180 int testapp_xdp_shared_umem(struct test_spec *test)
2181 {
2182 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2183 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2184 	int ret;
2185 
2186 	test->total_steps = 1;
2187 	test->nb_sockets = 2;
2188 
2189 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
2190 			       skel_tx->progs.xsk_xdp_shared_umem,
2191 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
2192 
2193 	if (pkt_stream_even_odd_sequence(test))
2194 		return TEST_FAILURE;
2195 
2196 	ret = testapp_validate_traffic(test);
2197 
2198 	release_even_odd_sequence(test);
2199 
2200 	return ret;
2201 }
2202 
testapp_poll_txq_tmout(struct test_spec * test)2203 int testapp_poll_txq_tmout(struct test_spec *test)
2204 {
2205 	test->ifobj_tx->use_poll = true;
2206 	/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
2207 	test->ifobj_tx->umem->frame_size = 2048;
2208 	if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
2209 		return TEST_FAILURE;
2210 	return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
2211 }
2212 
testapp_poll_rxq_tmout(struct test_spec * test)2213 int testapp_poll_rxq_tmout(struct test_spec *test)
2214 {
2215 	test->ifobj_rx->use_poll = true;
2216 	return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
2217 }
2218 
testapp_too_many_frags(struct test_spec * test)2219 int testapp_too_many_frags(struct test_spec *test)
2220 {
2221 	struct pkt *pkts;
2222 	u32 max_frags, i;
2223 	int ret = TEST_FAILURE;
2224 
2225 	if (test->mode == TEST_MODE_ZC) {
2226 		max_frags = test->ifobj_tx->xdp_zc_max_segs;
2227 	} else {
2228 		max_frags = test->ifobj_tx->max_skb_frags;
2229 		max_frags += 1;
2230 	}
2231 
2232 	pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
2233 	if (!pkts)
2234 		return TEST_FAILURE;
2235 
2236 	test->mtu = MAX_ETH_JUMBO_SIZE;
2237 
2238 	/* Valid packet for synch */
2239 	pkts[0].len = MIN_PKT_SIZE;
2240 	pkts[0].valid = true;
2241 
2242 	/* One valid packet with the max amount of frags */
2243 	for (i = 1; i < max_frags + 1; i++) {
2244 		pkts[i].len = MIN_PKT_SIZE;
2245 		pkts[i].options = XDP_PKT_CONTD;
2246 		pkts[i].valid = true;
2247 	}
2248 	pkts[max_frags].options = 0;
2249 
2250 	/* An invalid packet with the max amount of frags but signals packet
2251 	 * continues on the last frag
2252 	 */
2253 	for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
2254 		pkts[i].len = MIN_PKT_SIZE;
2255 		pkts[i].options = XDP_PKT_CONTD;
2256 		pkts[i].valid = false;
2257 	}
2258 
2259 	/* Valid packet for synch */
2260 	pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
2261 	pkts[2 * max_frags + 1].valid = true;
2262 
2263 	if (pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) {
2264 		free(pkts);
2265 		return TEST_FAILURE;
2266 	}
2267 
2268 	ret = testapp_validate_traffic(test);
2269 	free(pkts);
2270 	return ret;
2271 }
2272 
xsk_load_xdp_programs(struct ifobject * ifobj)2273 static int xsk_load_xdp_programs(struct ifobject *ifobj)
2274 {
2275 	ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
2276 	if (libbpf_get_error(ifobj->xdp_progs))
2277 		return libbpf_get_error(ifobj->xdp_progs);
2278 
2279 	return 0;
2280 }
2281 
2282 /* Simple test */
hugepages_present(void)2283 static bool hugepages_present(void)
2284 {
2285 	size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
2286 	void *bufs;
2287 
2288 	bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
2289 		    MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
2290 	if (bufs == MAP_FAILED)
2291 		return false;
2292 
2293 	mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
2294 	munmap(bufs, mmap_sz);
2295 	return true;
2296 }
2297 
init_iface(struct ifobject * ifobj,thread_func_t func_ptr)2298 int init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
2299 {
2300 	LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
2301 	int err;
2302 
2303 	ifobj->func_ptr = func_ptr;
2304 
2305 	err = xsk_load_xdp_programs(ifobj);
2306 	if (err) {
2307 		ksft_print_msg("Error loading XDP program\n");
2308 		return err;
2309 	}
2310 
2311 	if (hugepages_present())
2312 		ifobj->unaligned_supp = true;
2313 
2314 	err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
2315 	if (err) {
2316 		ksft_print_msg("Error querying XDP capabilities\n");
2317 		return err;
2318 	}
2319 	if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
2320 		ifobj->multi_buff_supp = true;
2321 	if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
2322 		if (query_opts.xdp_zc_max_segs > 1) {
2323 			ifobj->multi_buff_zc_supp = true;
2324 			ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
2325 		} else {
2326 			ifobj->xdp_zc_max_segs = 0;
2327 		}
2328 	}
2329 
2330 	return 0;
2331 }
2332 
testapp_send_receive(struct test_spec * test)2333 int testapp_send_receive(struct test_spec *test)
2334 {
2335 	return testapp_validate_traffic(test);
2336 }
2337 
testapp_send_receive_2k_frame(struct test_spec * test)2338 int testapp_send_receive_2k_frame(struct test_spec *test)
2339 {
2340 	test->ifobj_tx->umem->frame_size = 2048;
2341 	test->ifobj_rx->umem->frame_size = 2048;
2342 	if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
2343 		return TEST_FAILURE;
2344 	return testapp_validate_traffic(test);
2345 }
2346 
testapp_poll_rx(struct test_spec * test)2347 int testapp_poll_rx(struct test_spec *test)
2348 {
2349 	test->ifobj_rx->use_poll = true;
2350 	return testapp_validate_traffic(test);
2351 }
2352 
testapp_poll_tx(struct test_spec * test)2353 int testapp_poll_tx(struct test_spec *test)
2354 {
2355 	test->ifobj_tx->use_poll = true;
2356 	return testapp_validate_traffic(test);
2357 }
2358 
testapp_aligned_inv_desc(struct test_spec * test)2359 int testapp_aligned_inv_desc(struct test_spec *test)
2360 {
2361 	return testapp_invalid_desc(test);
2362 }
2363 
testapp_aligned_inv_desc_2k_frame(struct test_spec * test)2364 int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
2365 {
2366 	test->ifobj_tx->umem->frame_size = 2048;
2367 	test->ifobj_rx->umem->frame_size = 2048;
2368 	return testapp_invalid_desc(test);
2369 }
2370 
testapp_unaligned_inv_desc(struct test_spec * test)2371 int testapp_unaligned_inv_desc(struct test_spec *test)
2372 {
2373 	test->ifobj_tx->umem->unaligned_mode = true;
2374 	test->ifobj_rx->umem->unaligned_mode = true;
2375 	return testapp_invalid_desc(test);
2376 }
2377 
testapp_unaligned_inv_desc_4001_frame(struct test_spec * test)2378 int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
2379 {
2380 	u64 page_size, umem_size;
2381 
2382 	/* Odd frame size so the UMEM doesn't end near a page boundary. */
2383 	test->ifobj_tx->umem->frame_size = 4001;
2384 	test->ifobj_rx->umem->frame_size = 4001;
2385 	test->ifobj_tx->umem->unaligned_mode = true;
2386 	test->ifobj_rx->umem->unaligned_mode = true;
2387 	/* This test exists to test descriptors that staddle the end of
2388 	 * the UMEM but not a page.
2389 	 */
2390 	page_size = sysconf(_SC_PAGESIZE);
2391 	umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
2392 	assert(umem_size % page_size > MIN_PKT_SIZE);
2393 	assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
2394 
2395 	return testapp_invalid_desc(test);
2396 }
2397 
testapp_aligned_inv_desc_mb(struct test_spec * test)2398 int testapp_aligned_inv_desc_mb(struct test_spec *test)
2399 {
2400 	return testapp_invalid_desc_mb(test);
2401 }
2402 
testapp_unaligned_inv_desc_mb(struct test_spec * test)2403 int testapp_unaligned_inv_desc_mb(struct test_spec *test)
2404 {
2405 	test->ifobj_tx->umem->unaligned_mode = true;
2406 	test->ifobj_rx->umem->unaligned_mode = true;
2407 	return testapp_invalid_desc_mb(test);
2408 }
2409 
testapp_xdp_metadata(struct test_spec * test)2410 int testapp_xdp_metadata(struct test_spec *test)
2411 {
2412 	return testapp_xdp_metadata_copy(test);
2413 }
2414 
testapp_xdp_metadata_mb(struct test_spec * test)2415 int testapp_xdp_metadata_mb(struct test_spec *test)
2416 {
2417 	test->mtu = MAX_ETH_JUMBO_SIZE;
2418 	return testapp_xdp_metadata_copy(test);
2419 }
2420 
testapp_hw_sw_min_ring_size(struct test_spec * test)2421 int testapp_hw_sw_min_ring_size(struct test_spec *test)
2422 {
2423 	int ret;
2424 
2425 	test->set_ring = true;
2426 	test->total_steps = 2;
2427 	test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
2428 	test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
2429 	test->ifobj_tx->xsk->batch_size = 1;
2430 	test->ifobj_rx->xsk->batch_size = 1;
2431 	ret = testapp_validate_traffic(test);
2432 	if (ret)
2433 		return ret;
2434 
2435 	/* Set batch size to hw_ring_size - 1 */
2436 	test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2437 	test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2438 	return testapp_validate_traffic(test);
2439 }
2440 
testapp_hw_sw_max_ring_size(struct test_spec * test)2441 int testapp_hw_sw_max_ring_size(struct test_spec *test)
2442 {
2443 	u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
2444 	int ret;
2445 
2446 	test->set_ring = true;
2447 	test->total_steps = 2;
2448 	test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
2449 	test->ifobj_tx->ring.rx_pending  = test->ifobj_tx->ring.rx_max_pending;
2450 	test->ifobj_rx->umem->num_frames = max_descs;
2451 	test->ifobj_rx->umem->fill_size = max_descs;
2452 	test->ifobj_rx->umem->comp_size = max_descs;
2453 	test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2454 	test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2455 
2456 	ret = testapp_validate_traffic(test);
2457 	if (ret)
2458 		return ret;
2459 
2460 	/* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
2461 	 * updating the Rx HW tail register.
2462 	 */
2463 	test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2464 	test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2465 	if (pkt_stream_replace(test, max_descs, MIN_PKT_SIZE)) {
2466 		clean_sockets(test, test->ifobj_tx);
2467 		clean_sockets(test, test->ifobj_rx);
2468 		clean_umem(test, test->ifobj_rx, test->ifobj_tx);
2469 		return TEST_FAILURE;
2470 	}
2471 
2472 	return testapp_validate_traffic(test);
2473 }
2474 
testapp_xdp_adjust_tail(struct test_spec * test,int adjust_value)2475 static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
2476 {
2477 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2478 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2479 
2480 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
2481 			       skel_tx->progs.xsk_xdp_adjust_tail,
2482 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
2483 
2484 	skel_rx->bss->adjust_value = adjust_value;
2485 
2486 	return testapp_validate_traffic(test);
2487 }
2488 
testapp_adjust_tail(struct test_spec * test,u32 value,u32 pkt_len)2489 static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
2490 {
2491 	int ret;
2492 
2493 	test->adjust_tail_support = true;
2494 	test->adjust_tail = true;
2495 	test->total_steps = 1;
2496 
2497 	ret = pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
2498 	if (ret)
2499 		return TEST_FAILURE;
2500 
2501 	ret = pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
2502 	if (ret)
2503 		return TEST_FAILURE;
2504 
2505 	ret = testapp_xdp_adjust_tail(test, value);
2506 	if (ret)
2507 		return ret;
2508 
2509 	if (!test->adjust_tail_support) {
2510 		ksft_print_msg("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
2511 				      mode_string(test), busy_poll_string(test));
2512 		return TEST_SKIP;
2513 	}
2514 
2515 	return 0;
2516 }
2517 
testapp_adjust_tail_shrink(struct test_spec * test)2518 int testapp_adjust_tail_shrink(struct test_spec *test)
2519 {
2520 	/* Shrink by 4 bytes for testing purpose */
2521 	return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
2522 }
2523 
testapp_adjust_tail_shrink_mb(struct test_spec * test)2524 int testapp_adjust_tail_shrink_mb(struct test_spec *test)
2525 {
2526 	test->mtu = MAX_ETH_JUMBO_SIZE;
2527 	/* Shrink by the frag size */
2528 	return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
2529 }
2530 
testapp_adjust_tail_grow(struct test_spec * test)2531 int testapp_adjust_tail_grow(struct test_spec *test)
2532 {
2533 	if (test->mode == TEST_MODE_SKB)
2534 		return TEST_SKIP;
2535 
2536 	/* Grow by 4 bytes for testing purpose */
2537 	return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
2538 }
2539 
testapp_adjust_tail_grow_mb(struct test_spec * test)2540 int testapp_adjust_tail_grow_mb(struct test_spec *test)
2541 {
2542 	u32 grow_size;
2543 
2544 	if (test->mode == TEST_MODE_SKB)
2545 		return TEST_SKIP;
2546 
2547 	/* worst case scenario is when underlying setup will work on 3k
2548 	 * buffers, let us account for it; given that we will use 6k as
2549 	 * pkt_len, expect that it will be broken down to 2 descs each
2550 	 * with 3k payload;
2551 	 *
2552 	 * 4k is truesize, 3k payload, 256 HR, 320 TR;
2553 	 */
2554 	grow_size = XSK_UMEM__MAX_FRAME_SIZE -
2555 		    XSK_UMEM__LARGE_FRAME_SIZE -
2556 		    XDP_PACKET_HEADROOM -
2557 		    test->ifobj_tx->umem_tailroom;
2558 	test->mtu = MAX_ETH_JUMBO_SIZE;
2559 
2560 	return testapp_adjust_tail(test, grow_size, XSK_UMEM__LARGE_FRAME_SIZE * 2);
2561 }
2562 
testapp_tx_queue_consumer(struct test_spec * test)2563 int testapp_tx_queue_consumer(struct test_spec *test)
2564 {
2565 	int nr_packets;
2566 
2567 	if (test->mode == TEST_MODE_ZC) {
2568 		ksft_print_msg("Can not run TX_QUEUE_CONSUMER test for ZC mode\n");
2569 		return TEST_SKIP;
2570 	}
2571 
2572 	nr_packets = MAX_TX_BUDGET_DEFAULT + 1;
2573 	if (pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE))
2574 		return TEST_FAILURE;
2575 	test->ifobj_tx->xsk->batch_size = nr_packets;
2576 	test->ifobj_tx->xsk->check_consumer = true;
2577 
2578 	return testapp_validate_traffic(test);
2579 }
2580 
ifobject_create(void)2581 struct ifobject *ifobject_create(void)
2582 {
2583 	struct ifobject *ifobj;
2584 
2585 	ifobj = calloc(1, sizeof(struct ifobject));
2586 	if (!ifobj)
2587 		return NULL;
2588 
2589 	ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
2590 	if (!ifobj->xsk_arr)
2591 		goto out_xsk_arr;
2592 
2593 	ifobj->umem = calloc(1, sizeof(*ifobj->umem));
2594 	if (!ifobj->umem)
2595 		goto out_umem;
2596 
2597 	return ifobj;
2598 
2599 out_umem:
2600 	free(ifobj->xsk_arr);
2601 out_xsk_arr:
2602 	free(ifobj);
2603 	return NULL;
2604 }
2605 
ifobject_delete(struct ifobject * ifobj)2606 void ifobject_delete(struct ifobject *ifobj)
2607 {
2608 	free(ifobj->umem);
2609 	free(ifobj->xsk_arr);
2610 	free(ifobj);
2611 }
2612