xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision 03c11eb3b16dc0058589751dfd91f254be2be613)
1f0a42ab5SHou Tao // SPDX-License-Identifier: GPL-2.0
2f0a42ab5SHou Tao /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3f0a42ab5SHou Tao #include <vmlinux.h>
4f0a42ab5SHou Tao #include <bpf/bpf_tracing.h>
5f0a42ab5SHou Tao #include <bpf/bpf_helpers.h>
6f0a42ab5SHou Tao 
7f0a42ab5SHou Tao #include "bpf_experimental.h"
8f0a42ab5SHou Tao #include "bpf_misc.h"
9f0a42ab5SHou Tao 
10f0a42ab5SHou Tao struct generic_map_value {
11f0a42ab5SHou Tao 	void *data;
12f0a42ab5SHou Tao };
13f0a42ab5SHou Tao 
14f0a42ab5SHou Tao char _license[] SEC("license") = "GPL";
15f0a42ab5SHou Tao 
16f0a42ab5SHou Tao const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
17f0a42ab5SHou Tao const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
18f0a42ab5SHou Tao 
19f0a42ab5SHou Tao const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
2069ff403dSHou Tao const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
21f0a42ab5SHou Tao 
22f0a42ab5SHou Tao int err = 0;
23*21f5a801SYonghong Song u32 pid = 0;
24*21f5a801SYonghong Song 
25*21f5a801SYonghong Song #define DEFINE_ARRAY_WITH_KPTR(_size) \
26f0a42ab5SHou Tao 	struct bin_data_##_size { \
27495d2d81SAlexei Starovoitov 		char data[_size - sizeof(void *)]; \
28f0a42ab5SHou Tao 	}; \
29f0a42ab5SHou Tao 	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
30f0a42ab5SHou Tao 	struct bin_data_##_size *__bin_data_##_size; \
31f0a42ab5SHou Tao 	struct map_value_##_size { \
32f0a42ab5SHou Tao 		struct bin_data_##_size __kptr * data; \
33*21f5a801SYonghong Song 	}; \
34*21f5a801SYonghong Song 	struct { \
35f0a42ab5SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
36f0a42ab5SHou Tao 		__type(key, int); \
37f0a42ab5SHou Tao 		__type(value, struct map_value_##_size); \
38f0a42ab5SHou Tao 		__uint(max_entries, 128); \
39f0a42ab5SHou Tao 	} array_##_size SEC(".maps")
40f0a42ab5SHou Tao 
41f0a42ab5SHou Tao #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
42f0a42ab5SHou Tao 	struct percpu_bin_data_##_size { \
43d440ba91SHou Tao 		char data[_size]; \
44f0a42ab5SHou Tao 	}; \
45d440ba91SHou Tao 	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
46*21f5a801SYonghong Song 	struct map_value_percpu_##_size { \
47*21f5a801SYonghong Song 		struct percpu_bin_data_##_size __percpu_kptr * data; \
48*21f5a801SYonghong Song 	}; \
49*21f5a801SYonghong Song 	struct { \
50d440ba91SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
51*21f5a801SYonghong Song 		__type(key, int); \
52d440ba91SHou Tao 		__type(value, struct map_value_percpu_##_size); \
53d440ba91SHou Tao 		__uint(max_entries, 128); \
54d440ba91SHou Tao 	} array_percpu_##_size SEC(".maps")
55d440ba91SHou Tao 
batch_alloc(struct bpf_map * map,unsigned int batch,unsigned int idx)56d440ba91SHou Tao static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
57d440ba91SHou Tao {
58d440ba91SHou Tao 	struct generic_map_value *value;
59d440ba91SHou Tao 	unsigned int i, key;
60d440ba91SHou Tao 	void *old, *new;
61f0a42ab5SHou Tao 
62f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
63f0a42ab5SHou Tao 		key = i;
64f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
65f0a42ab5SHou Tao 		if (!value) {
66f0a42ab5SHou Tao 			err = 1;
67f0a42ab5SHou Tao 			return;
68f0a42ab5SHou Tao 		}
69f0a42ab5SHou Tao 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
70f0a42ab5SHou Tao 		if (!new) {
71f0a42ab5SHou Tao 			err = 2;
72f0a42ab5SHou Tao 			return;
73f0a42ab5SHou Tao 		}
74f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
75f0a42ab5SHou Tao 		if (old) {
76f0a42ab5SHou Tao 			bpf_obj_drop(old);
77f0a42ab5SHou Tao 			err = 3;
78f0a42ab5SHou Tao 			return;
79f0a42ab5SHou Tao 		}
80f0a42ab5SHou Tao 	}
81f0a42ab5SHou Tao }
82f0a42ab5SHou Tao 
batch_free(struct bpf_map * map,unsigned int batch,unsigned int idx)83f0a42ab5SHou Tao static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
84f0a42ab5SHou Tao {
85d440ba91SHou Tao 	struct generic_map_value *value;
86d440ba91SHou Tao 	unsigned int i, key;
87d440ba91SHou Tao 	void *old;
88d440ba91SHou Tao 
89d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
90d440ba91SHou Tao 		key = i;
91d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
92d440ba91SHou Tao 		if (!value) {
93f0a42ab5SHou Tao 			err = 4;
94f0a42ab5SHou Tao 			return;
95f0a42ab5SHou Tao 		}
96f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
97f0a42ab5SHou Tao 		if (!old) {
98f0a42ab5SHou Tao 			err = 5;
99f0a42ab5SHou Tao 			return;
100f0a42ab5SHou Tao 		}
101f0a42ab5SHou Tao 		bpf_obj_drop(old);
102f0a42ab5SHou Tao 	}
103f0a42ab5SHou Tao }
104f0a42ab5SHou Tao 
batch_percpu_alloc(struct bpf_map * map,unsigned int batch,unsigned int idx)105f0a42ab5SHou Tao static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
106f0a42ab5SHou Tao 					       unsigned int idx)
107f0a42ab5SHou Tao {
108f0a42ab5SHou Tao 	struct generic_map_value *value;
109d440ba91SHou Tao 	unsigned int i, key;
110d440ba91SHou Tao 	void *old, *new;
111d440ba91SHou Tao 
112d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
113d440ba91SHou Tao 		key = i;
114d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
115d440ba91SHou Tao 		if (!value) {
116d440ba91SHou Tao 			err = 1;
117d440ba91SHou Tao 			return;
118d440ba91SHou Tao 		}
119d440ba91SHou Tao 		/* per-cpu allocator may not be able to refill in time */
120d440ba91SHou Tao 		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
121d440ba91SHou Tao 		if (!new)
122d440ba91SHou Tao 			continue;
123d440ba91SHou Tao 
124*21f5a801SYonghong Song 		old = bpf_kptr_xchg(&value->data, new);
125d440ba91SHou Tao 		if (old) {
126d440ba91SHou Tao 			bpf_percpu_obj_drop(old);
127d440ba91SHou Tao 			err = 2;
128d440ba91SHou Tao 			return;
129d440ba91SHou Tao 		}
130d440ba91SHou Tao 	}
131d440ba91SHou Tao }
132d440ba91SHou Tao 
batch_percpu_free(struct bpf_map * map,unsigned int batch,unsigned int idx)133d440ba91SHou Tao static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
134d440ba91SHou Tao 					      unsigned int idx)
135d440ba91SHou Tao {
136d440ba91SHou Tao 	struct generic_map_value *value;
137d440ba91SHou Tao 	unsigned int i, key;
138d440ba91SHou Tao 	void *old;
139d440ba91SHou Tao 
140d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
141d440ba91SHou Tao 		key = i;
142d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
143d440ba91SHou Tao 		if (!value) {
144d440ba91SHou Tao 			err = 3;
145d440ba91SHou Tao 			return;
146d440ba91SHou Tao 		}
147d440ba91SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
148d440ba91SHou Tao 		if (!old)
149d440ba91SHou Tao 			continue;
150d440ba91SHou Tao 		bpf_percpu_obj_drop(old);
151d440ba91SHou Tao 	}
152d440ba91SHou Tao }
153d440ba91SHou Tao 
154d440ba91SHou Tao #define CALL_BATCH_ALLOC(size, batch, idx) \
155d440ba91SHou Tao 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
156d440ba91SHou Tao 
157d440ba91SHou Tao #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
158d440ba91SHou Tao 	do { \
159d440ba91SHou Tao 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
160d440ba91SHou Tao 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
161f0a42ab5SHou Tao 	} while (0)
162d440ba91SHou Tao 
163d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
164d440ba91SHou Tao 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
165d440ba91SHou Tao 
166d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
167d440ba91SHou Tao 	do { \
168d440ba91SHou Tao 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
169d440ba91SHou Tao 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
170d440ba91SHou Tao 	} while (0)
171d440ba91SHou Tao 
172d440ba91SHou Tao /* kptr doesn't support bin_data_8 which is a zero-sized array */
173d440ba91SHou Tao DEFINE_ARRAY_WITH_KPTR(16);
174d440ba91SHou Tao DEFINE_ARRAY_WITH_KPTR(32);
175f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(64);
17669ff403dSHou Tao DEFINE_ARRAY_WITH_KPTR(96);
177f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(128);
178f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(192);
179f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(256);
180f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(512);
181f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(1024);
182f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(2048);
183f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(4096);
184f0a42ab5SHou Tao 
185f0a42ab5SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
186f0a42ab5SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
187f0a42ab5SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
188f0a42ab5SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
189*21f5a801SYonghong Song DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
190d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
191d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
192d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
193d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
194d440ba91SHou Tao 
195d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_batch_alloc_free(void * ctx)196d440ba91SHou Tao int test_batch_alloc_free(void *ctx)
197d440ba91SHou Tao {
198d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
199d440ba91SHou Tao 		return 0;
200d440ba91SHou Tao 
201f0a42ab5SHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
202f0a42ab5SHou Tao 	 * then free 128 16-bytes objects in batch to trigger freeing.
203f0a42ab5SHou Tao 	 */
204f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(16, 128, 0);
20569ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(32, 128, 1);
20669ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(64, 128, 2);
207f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(96, 128, 3);
20869ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(128, 128, 4);
20969ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(192, 128, 5);
21069ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(256, 128, 6);
21169ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(512, 64, 7);
21269ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
21369ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
21469ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
21569ff403dSHou Tao 
21669ff403dSHou Tao 	return 0;
21769ff403dSHou Tao }
21869ff403dSHou Tao 
219f0a42ab5SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_free_through_map_free(void * ctx)220f0a42ab5SHou Tao int test_free_through_map_free(void *ctx)
221f0a42ab5SHou Tao {
222d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
223d440ba91SHou Tao 		return 0;
224d440ba91SHou Tao 
225d440ba91SHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
226d440ba91SHou Tao 	 * then free these objects through map free.
227d440ba91SHou Tao 	 */
228d440ba91SHou Tao 	CALL_BATCH_ALLOC(16, 128, 0);
22969ff403dSHou Tao 	CALL_BATCH_ALLOC(32, 128, 1);
230d440ba91SHou Tao 	CALL_BATCH_ALLOC(64, 128, 2);
231d440ba91SHou Tao 	CALL_BATCH_ALLOC(96, 128, 3);
23269ff403dSHou Tao 	CALL_BATCH_ALLOC(128, 128, 4);
23369ff403dSHou Tao 	CALL_BATCH_ALLOC(192, 128, 5);
23469ff403dSHou Tao 	CALL_BATCH_ALLOC(256, 128, 6);
23569ff403dSHou Tao 	CALL_BATCH_ALLOC(512, 64, 7);
23669ff403dSHou Tao 	CALL_BATCH_ALLOC(1024, 32, 8);
23769ff403dSHou Tao 	CALL_BATCH_ALLOC(2048, 16, 9);
23869ff403dSHou Tao 	CALL_BATCH_ALLOC(4096, 8, 10);
23969ff403dSHou Tao 
24069ff403dSHou Tao 	return 0;
24169ff403dSHou Tao }
24269ff403dSHou Tao 
243d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_batch_percpu_alloc_free(void * ctx)244d440ba91SHou Tao int test_batch_percpu_alloc_free(void *ctx)
245d440ba91SHou Tao {
246d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
247d440ba91SHou Tao 		return 0;
248d440ba91SHou Tao 
249d440ba91SHou Tao 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
250d440ba91SHou Tao 	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
251d440ba91SHou Tao 	 */
252d440ba91SHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
253*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
254*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
255d440ba91SHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
256*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
257*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
258*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
259*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
260*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
261*21f5a801SYonghong Song 
262*21f5a801SYonghong Song 	return 0;
263*21f5a801SYonghong Song }
264*21f5a801SYonghong Song 
265d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_percpu_free_through_map_free(void * ctx)266d440ba91SHou Tao int test_percpu_free_through_map_free(void *ctx)
267d440ba91SHou Tao {
268d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
269d440ba91SHou Tao 		return 0;
270d440ba91SHou Tao 
271d440ba91SHou Tao 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
272d440ba91SHou Tao 	 * then free these object through map free.
273d440ba91SHou Tao 	 */
274d440ba91SHou Tao 	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
275*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
276d440ba91SHou Tao 	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
277d440ba91SHou Tao 	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
278*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
279*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
280*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
281*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
282*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
283*21f5a801SYonghong Song 
284*21f5a801SYonghong Song 	return 0;
285*21f5a801SYonghong Song }
286*21f5a801SYonghong Song