xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision 69ff403d87be4812571c54b1159e24998414bcab)
1f0a42ab5SHou Tao // SPDX-License-Identifier: GPL-2.0
2f0a42ab5SHou Tao /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3f0a42ab5SHou Tao #include <vmlinux.h>
4f0a42ab5SHou Tao #include <bpf/bpf_tracing.h>
5f0a42ab5SHou Tao #include <bpf/bpf_helpers.h>
6f0a42ab5SHou Tao 
7f0a42ab5SHou Tao #include "bpf_experimental.h"
8f0a42ab5SHou Tao #include "bpf_misc.h"
9f0a42ab5SHou Tao 
10f0a42ab5SHou Tao #ifndef ARRAY_SIZE
11f0a42ab5SHou Tao #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
12f0a42ab5SHou Tao #endif
13f0a42ab5SHou Tao 
14f0a42ab5SHou Tao struct generic_map_value {
15f0a42ab5SHou Tao 	void *data;
16f0a42ab5SHou Tao };
17f0a42ab5SHou Tao 
18f0a42ab5SHou Tao char _license[] SEC("license") = "GPL";
19f0a42ab5SHou Tao 
20*69ff403dSHou Tao const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
21f0a42ab5SHou Tao const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
22f0a42ab5SHou Tao 
23f0a42ab5SHou Tao int err = 0;
24f0a42ab5SHou Tao int pid = 0;
25f0a42ab5SHou Tao 
26f0a42ab5SHou Tao #define DEFINE_ARRAY_WITH_KPTR(_size) \
27f0a42ab5SHou Tao 	struct bin_data_##_size { \
28f0a42ab5SHou Tao 		char data[_size - sizeof(void *)]; \
29f0a42ab5SHou Tao 	}; \
30f0a42ab5SHou Tao 	struct map_value_##_size { \
31f0a42ab5SHou Tao 		struct bin_data_##_size __kptr * data; \
32f0a42ab5SHou Tao 		/* To emit BTF info for bin_data_xx */ \
33f0a42ab5SHou Tao 		struct bin_data_##_size not_used; \
34f0a42ab5SHou Tao 	}; \
35f0a42ab5SHou Tao 	struct { \
36f0a42ab5SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
37f0a42ab5SHou Tao 		__type(key, int); \
38f0a42ab5SHou Tao 		__type(value, struct map_value_##_size); \
39f0a42ab5SHou Tao 		__uint(max_entries, 128); \
40d440ba91SHou Tao 	} array_##_size SEC(".maps")
41f0a42ab5SHou Tao 
42d440ba91SHou Tao #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
43d440ba91SHou Tao 	struct map_value_percpu_##_size { \
44d440ba91SHou Tao 		struct bin_data_##_size __percpu_kptr * data; \
45d440ba91SHou Tao 	}; \
46d440ba91SHou Tao 	struct { \
47d440ba91SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
48d440ba91SHou Tao 		__type(key, int); \
49d440ba91SHou Tao 		__type(value, struct map_value_percpu_##_size); \
50d440ba91SHou Tao 		__uint(max_entries, 128); \
51d440ba91SHou Tao 	} array_percpu_##_size SEC(".maps")
52d440ba91SHou Tao 
53d440ba91SHou Tao static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
54f0a42ab5SHou Tao {
55f0a42ab5SHou Tao 	struct generic_map_value *value;
56f0a42ab5SHou Tao 	unsigned int i, key;
57f0a42ab5SHou Tao 	void *old, *new;
58f0a42ab5SHou Tao 
59f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
60f0a42ab5SHou Tao 		key = i;
61f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
62f0a42ab5SHou Tao 		if (!value) {
63f0a42ab5SHou Tao 			err = 1;
64f0a42ab5SHou Tao 			return;
65f0a42ab5SHou Tao 		}
66f0a42ab5SHou Tao 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
67f0a42ab5SHou Tao 		if (!new) {
68f0a42ab5SHou Tao 			err = 2;
69f0a42ab5SHou Tao 			return;
70f0a42ab5SHou Tao 		}
71f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
72f0a42ab5SHou Tao 		if (old) {
73f0a42ab5SHou Tao 			bpf_obj_drop(old);
74f0a42ab5SHou Tao 			err = 3;
75f0a42ab5SHou Tao 			return;
76f0a42ab5SHou Tao 		}
77f0a42ab5SHou Tao 	}
78d440ba91SHou Tao }
79d440ba91SHou Tao 
80d440ba91SHou Tao static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
81d440ba91SHou Tao {
82d440ba91SHou Tao 	struct generic_map_value *value;
83d440ba91SHou Tao 	unsigned int i, key;
84d440ba91SHou Tao 	void *old;
85d440ba91SHou Tao 
86f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
87f0a42ab5SHou Tao 		key = i;
88f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
89f0a42ab5SHou Tao 		if (!value) {
90f0a42ab5SHou Tao 			err = 4;
91f0a42ab5SHou Tao 			return;
92f0a42ab5SHou Tao 		}
93f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
94f0a42ab5SHou Tao 		if (!old) {
95f0a42ab5SHou Tao 			err = 5;
96f0a42ab5SHou Tao 			return;
97f0a42ab5SHou Tao 		}
98f0a42ab5SHou Tao 		bpf_obj_drop(old);
99f0a42ab5SHou Tao 	}
100f0a42ab5SHou Tao }
101f0a42ab5SHou Tao 
102d440ba91SHou Tao static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
103d440ba91SHou Tao 					       unsigned int idx)
104d440ba91SHou Tao {
105d440ba91SHou Tao 	struct generic_map_value *value;
106d440ba91SHou Tao 	unsigned int i, key;
107d440ba91SHou Tao 	void *old, *new;
108d440ba91SHou Tao 
109d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
110d440ba91SHou Tao 		key = i;
111d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
112d440ba91SHou Tao 		if (!value) {
113d440ba91SHou Tao 			err = 1;
114d440ba91SHou Tao 			return;
115d440ba91SHou Tao 		}
116d440ba91SHou Tao 		/* per-cpu allocator may not be able to refill in time */
117d440ba91SHou Tao 		new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL);
118d440ba91SHou Tao 		if (!new)
119d440ba91SHou Tao 			continue;
120d440ba91SHou Tao 
121d440ba91SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
122d440ba91SHou Tao 		if (old) {
123d440ba91SHou Tao 			bpf_percpu_obj_drop(old);
124d440ba91SHou Tao 			err = 2;
125d440ba91SHou Tao 			return;
126d440ba91SHou Tao 		}
127d440ba91SHou Tao 	}
128d440ba91SHou Tao }
129d440ba91SHou Tao 
130d440ba91SHou Tao static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
131d440ba91SHou Tao 					      unsigned int idx)
132d440ba91SHou Tao {
133d440ba91SHou Tao 	struct generic_map_value *value;
134d440ba91SHou Tao 	unsigned int i, key;
135d440ba91SHou Tao 	void *old;
136d440ba91SHou Tao 
137d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
138d440ba91SHou Tao 		key = i;
139d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
140d440ba91SHou Tao 		if (!value) {
141d440ba91SHou Tao 			err = 3;
142d440ba91SHou Tao 			return;
143d440ba91SHou Tao 		}
144d440ba91SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
145d440ba91SHou Tao 		if (!old)
146d440ba91SHou Tao 			continue;
147d440ba91SHou Tao 		bpf_percpu_obj_drop(old);
148d440ba91SHou Tao 	}
149d440ba91SHou Tao }
150d440ba91SHou Tao 
151d440ba91SHou Tao #define CALL_BATCH_ALLOC(size, batch, idx) \
152d440ba91SHou Tao 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
153d440ba91SHou Tao 
154f0a42ab5SHou Tao #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
155d440ba91SHou Tao 	do { \
156d440ba91SHou Tao 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
157d440ba91SHou Tao 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
158d440ba91SHou Tao 	} while (0)
159d440ba91SHou Tao 
160d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
161d440ba91SHou Tao 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
162d440ba91SHou Tao 
163d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
164d440ba91SHou Tao 	do { \
165d440ba91SHou Tao 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
166d440ba91SHou Tao 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
167d440ba91SHou Tao 	} while (0)
168f0a42ab5SHou Tao 
169*69ff403dSHou Tao /* kptr doesn't support bin_data_8 which is a zero-sized array */
170f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(16);
171f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(32);
172f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(64);
173f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(96);
174f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(128);
175f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(192);
176f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(256);
177f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(512);
178f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(1024);
179f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(2048);
180f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(4096);
181f0a42ab5SHou Tao 
182d440ba91SHou Tao /* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */
183d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
184d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
185d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
186d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
187d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
188d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
189d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
190d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
191d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
192d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
193d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
194d440ba91SHou Tao 
195d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196d440ba91SHou Tao int test_batch_alloc_free(void *ctx)
197f0a42ab5SHou Tao {
198f0a42ab5SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
199f0a42ab5SHou Tao 		return 0;
200f0a42ab5SHou Tao 
201*69ff403dSHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
202*69ff403dSHou Tao 	 * then free 128 16-bytes objects in batch to trigger freeing.
203f0a42ab5SHou Tao 	 */
204*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(16, 128, 0);
205*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(32, 128, 1);
206*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(64, 128, 2);
207*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(96, 128, 3);
208*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(128, 128, 4);
209*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(192, 128, 5);
210*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(256, 128, 6);
211*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(512, 64, 7);
212*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
213*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
214*69ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
215f0a42ab5SHou Tao 
216f0a42ab5SHou Tao 	return 0;
217f0a42ab5SHou Tao }
218d440ba91SHou Tao 
219d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
220d440ba91SHou Tao int test_free_through_map_free(void *ctx)
221d440ba91SHou Tao {
222d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
223d440ba91SHou Tao 		return 0;
224d440ba91SHou Tao 
225*69ff403dSHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
226d440ba91SHou Tao 	 * then free these objects through map free.
227d440ba91SHou Tao 	 */
228*69ff403dSHou Tao 	CALL_BATCH_ALLOC(16, 128, 0);
229*69ff403dSHou Tao 	CALL_BATCH_ALLOC(32, 128, 1);
230*69ff403dSHou Tao 	CALL_BATCH_ALLOC(64, 128, 2);
231*69ff403dSHou Tao 	CALL_BATCH_ALLOC(96, 128, 3);
232*69ff403dSHou Tao 	CALL_BATCH_ALLOC(128, 128, 4);
233*69ff403dSHou Tao 	CALL_BATCH_ALLOC(192, 128, 5);
234*69ff403dSHou Tao 	CALL_BATCH_ALLOC(256, 128, 6);
235*69ff403dSHou Tao 	CALL_BATCH_ALLOC(512, 64, 7);
236*69ff403dSHou Tao 	CALL_BATCH_ALLOC(1024, 32, 8);
237*69ff403dSHou Tao 	CALL_BATCH_ALLOC(2048, 16, 9);
238*69ff403dSHou Tao 	CALL_BATCH_ALLOC(4096, 8, 10);
239d440ba91SHou Tao 
240d440ba91SHou Tao 	return 0;
241d440ba91SHou Tao }
242d440ba91SHou Tao 
243d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
244d440ba91SHou Tao int test_batch_percpu_alloc_free(void *ctx)
245d440ba91SHou Tao {
246d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
247d440ba91SHou Tao 		return 0;
248d440ba91SHou Tao 
249d440ba91SHou Tao 	/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
250d440ba91SHou Tao 	 * then free 128 16-bytes per-cpu objects in batch to trigger freeing.
251d440ba91SHou Tao 	 */
252*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0);
253*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1);
254*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2);
255*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3);
256*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4);
257*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5);
258*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6);
259*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7);
260*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8);
261*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9);
262*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10);
263d440ba91SHou Tao 
264d440ba91SHou Tao 	return 0;
265d440ba91SHou Tao }
266d440ba91SHou Tao 
267d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
268d440ba91SHou Tao int test_percpu_free_through_map_free(void *ctx)
269d440ba91SHou Tao {
270d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
271d440ba91SHou Tao 		return 0;
272d440ba91SHou Tao 
273d440ba91SHou Tao 	/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
274d440ba91SHou Tao 	 * then free these object through map free.
275d440ba91SHou Tao 	 */
276*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(16, 128, 0);
277*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(32, 128, 1);
278*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(64, 128, 2);
279*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(96, 128, 3);
280*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(128, 128, 4);
281*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(192, 128, 5);
282*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(256, 128, 6);
283*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(512, 64, 7);
284*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(1024, 32, 8);
285*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(2048, 16, 9);
286*69ff403dSHou Tao 	CALL_BATCH_PERCPU_ALLOC(4096, 8, 10);
287d440ba91SHou Tao 
288d440ba91SHou Tao 	return 0;
289d440ba91SHou Tao }
290