xref: /linux/tools/testing/selftests/bpf/progs/kfunc_call_test.c (revision f5ad4101009e7f5f5984ffea6923d4fcd470932a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 #include "../test_kmods/bpf_testmod_kfunc.h"
7 
8 SEC("tc")
9 int kfunc_call_test5(struct __sk_buff *skb)
10 {
11 	struct bpf_sock *sk = skb->sk;
12 	int ret;
13 	u32 val32;
14 	u16 val16;
15 	u8 val8;
16 
17 	if (!sk)
18 		return -1;
19 
20 	sk = bpf_sk_fullsock(sk);
21 	if (!sk)
22 		return -1;
23 
24 	/*
25 	 * Test with constant values to verify zero-extension.
26 	 * ISA-dependent BPF asm:
27 	 *   With ALU32:    w1 = 0xFF; w2 = 0xFFFF; w3 = 0xFFFFffff
28 	 *   Without ALU32: r1 = 0xFF; r2 = 0xFFFF; r3 = 0xFFFFffff
29 	 * Both zero-extend to 64-bit before the kfunc call.
30 	 */
31 	ret = bpf_kfunc_call_test5(0xFF, 0xFFFF, 0xFFFFffffULL);
32 	if (ret)
33 		return ret;
34 
35 	val32 = bpf_get_prandom_u32();
36 	val16 = val32 & 0xFFFF;
37 	val8 = val32 & 0xFF;
38 	ret = bpf_kfunc_call_test5(val8, val16, val32);
39 	if (ret)
40 		return ret;
41 
42 	/*
43 	 * Test multiplication with different operand sizes:
44 	 *
45 	 * val8 * 0xFF:
46 	 *   - Both operands promote to int (32-bit signed)
47 	 *   - Result: 32-bit multiplication, truncated to u8, then zero-extended
48 	 *
49 	 * val16 * 0xFFFF:
50 	 *   - Both operands promote to int (32-bit signed)
51 	 *   - Result: 32-bit multiplication, truncated to u16, then zero-extended
52 	 *
53 	 * val32 * 0xFFFFffffULL:
54 	 *   - val32 (u32) promotes to unsigned long long (due to ULL suffix)
55 	 *   - Result: 64-bit unsigned multiplication, truncated to u32, then zero-extended
56 	 */
57 	ret = bpf_kfunc_call_test5(val8 * 0xFF, val16 * 0xFFFF, val32 * 0xFFFFffffULL);
58 	if (ret)
59 		return ret;
60 
61 	return 0;
62 }
63 
64 /*
65  * Assembly version testing the multiplication edge case explicitly.
66  * This ensures consistent testing across different ISA versions.
67  */
68 SEC("tc")
69 __naked int kfunc_call_test5_asm(void)
70 {
71 	asm volatile (
72 		/* Get a random u32 value */
73 		"call %[bpf_get_prandom_u32];"
74 		"r6 = r0;"              /* Save val32 in r6 */
75 
76 		/* Prepare first argument: val8 * 0xFF */
77 		"r1 = r6;"
78 		"r1 &= 0xFF;"           /* val8 = val32 & 0xFF */
79 		"r7 = 0xFF;"
80 		"r1 *= r7;"             /* 64-bit mult: r1 = r1 * r7 */
81 
82 		/* Prepare second argument: val16 * 0xFFFF */
83 		"r2 = r6;"
84 		"r2 &= 0xFFFF;"         /* val16 = val32 & 0xFFFF */
85 		"r7 = 0xFFFF;"
86 		"r2 *= r7;"             /* 64-bit mult: r2 = r2 * r7 */
87 
88 		/* Prepare third argument: val32 * 0xFFFFffff */
89 		"r3 = r6;"              /* val32 */
90 		"r7 = 0xFFFFffff;"
91 		"r3 *= r7;"             /* 64-bit mult: r3 = r3 * r7 */
92 
93 		/* Call kfunc with multiplication results */
94 		"call bpf_kfunc_call_test5;"
95 
96 		/* Check return value */
97 		"if r0 != 0 goto exit_%=;"
98 		"r0 = 0;"
99 		"exit_%=: exit;"
100 		:
101 		: __imm(bpf_get_prandom_u32)
102 		: __clobber_all);
103 }
104 
105 SEC("tc")
106 int kfunc_call_test4(struct __sk_buff *skb)
107 {
108 	struct bpf_sock *sk = skb->sk;
109 	long tmp;
110 
111 	if (!sk)
112 		return -1;
113 
114 	sk = bpf_sk_fullsock(sk);
115 	if (!sk)
116 		return -1;
117 
118 	tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
119 	return (tmp >> 32) + tmp;
120 }
121 
122 SEC("tc")
123 int kfunc_call_test2(struct __sk_buff *skb)
124 {
125 	struct bpf_sock *sk = skb->sk;
126 
127 	if (!sk)
128 		return -1;
129 
130 	sk = bpf_sk_fullsock(sk);
131 	if (!sk)
132 		return -1;
133 
134 	return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
135 }
136 
137 SEC("tc")
138 int kfunc_call_test1(struct __sk_buff *skb)
139 {
140 	struct bpf_sock *sk = skb->sk;
141 	__u64 a = 1ULL << 32;
142 	__u32 ret;
143 
144 	if (!sk)
145 		return -1;
146 
147 	sk = bpf_sk_fullsock(sk);
148 	if (!sk)
149 		return -1;
150 
151 	a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
152 	ret = a >> 32;   /* ret should be 2 */
153 	ret += (__u32)a; /* ret should be 12 */
154 
155 	return ret;
156 }
157 
158 SEC("tc")
159 int kfunc_call_test_ref_btf_id(struct __sk_buff *skb)
160 {
161 	struct prog_test_ref_kfunc *pt;
162 	unsigned long s = 0;
163 	int ret = 0;
164 
165 	pt = bpf_kfunc_call_test_acquire(&s);
166 	if (pt) {
167 		if (pt->a != 42 || pt->b != 108)
168 			ret = -1;
169 		bpf_kfunc_call_test_release(pt);
170 	}
171 	return ret;
172 }
173 
174 SEC("tc")
175 int kfunc_call_test_pass(struct __sk_buff *skb)
176 {
177 	struct prog_test_pass1 p1 = {};
178 	struct prog_test_pass2 p2 = {};
179 	short a = 0;
180 	__u64 b = 0;
181 	long c = 0;
182 	char d = 0;
183 	int e = 0;
184 
185 	bpf_kfunc_call_test_pass_ctx(skb);
186 	bpf_kfunc_call_test_pass1(&p1);
187 	bpf_kfunc_call_test_pass2(&p2);
188 
189 	bpf_kfunc_call_test_mem_len_pass1(&a, sizeof(a));
190 	bpf_kfunc_call_test_mem_len_pass1(&b, sizeof(b));
191 	bpf_kfunc_call_test_mem_len_pass1(&c, sizeof(c));
192 	bpf_kfunc_call_test_mem_len_pass1(&d, sizeof(d));
193 	bpf_kfunc_call_test_mem_len_pass1(&e, sizeof(e));
194 	bpf_kfunc_call_test_mem_len_fail2(&b, -1);
195 
196 	return 0;
197 }
198 
199 struct syscall_test_args {
200 	__u8 data[16];
201 	size_t size;
202 };
203 
204 SEC("syscall")
205 int kfunc_syscall_test(struct syscall_test_args *args)
206 {
207 	const long size = args->size;
208 
209 	if (size > sizeof(args->data))
210 		return -7; /* -E2BIG */
211 
212 	bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
213 	bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
214 	bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
215 
216 	return 0;
217 }
218 
219 SEC("syscall")
220 int kfunc_syscall_test_null(struct syscall_test_args *args)
221 {
222 	/* Must be called with args as a NULL pointer
223 	 * we do not check for it to have the verifier consider that
224 	 * the pointer might not be null, and so we can load it.
225 	 *
226 	 * So the following can not be added:
227 	 *
228 	 * if (args)
229 	 *      return -22;
230 	 */
231 
232 	bpf_kfunc_call_test_mem_len_pass1(args, 0);
233 
234 	return 0;
235 }
236 
237 SEC("tc")
238 int kfunc_call_test_get_mem(struct __sk_buff *skb)
239 {
240 	struct prog_test_ref_kfunc *pt;
241 	unsigned long s = 0;
242 	int *p = NULL;
243 	int ret = 0;
244 
245 	pt = bpf_kfunc_call_test_acquire(&s);
246 	if (pt) {
247 		p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
248 		if (p) {
249 			p[0] = 42;
250 			ret = p[1]; /* 108 */
251 		} else {
252 			ret = -1;
253 		}
254 
255 		if (ret >= 0) {
256 			p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
257 			if (p)
258 				ret = p[0]; /* 42 */
259 			else
260 				ret = -1;
261 		}
262 
263 		bpf_kfunc_call_test_release(pt);
264 	}
265 	return ret;
266 }
267 
268 SEC("tc")
269 int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
270 {
271 
272 	u32 expected = 5, actual;
273 
274 	actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
275 	return actual != expected ? -1 : 0;
276 }
277 
278 struct ctx_val {
279 	struct bpf_testmod_ctx __kptr *ctx;
280 };
281 
282 struct {
283 	__uint(type, BPF_MAP_TYPE_ARRAY);
284 	__uint(max_entries, 1);
285 	__type(key, int);
286 	__type(value, struct ctx_val);
287 } ctx_map SEC(".maps");
288 
289 SEC("tc")
290 int kfunc_call_ctx(struct __sk_buff *skb)
291 {
292 	struct bpf_testmod_ctx *ctx;
293 	int err = 0;
294 
295 	ctx = bpf_testmod_ctx_create(&err);
296 	if (!ctx && !err)
297 		err = -1;
298 	if (ctx) {
299 		int key = 0;
300 		struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
301 
302 		/* Transfer ctx to map to be freed via implicit dtor call
303 		 * on cleanup.
304 		 */
305 		if (ctx_val)
306 			ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
307 		if (ctx) {
308 			bpf_testmod_ctx_release(ctx);
309 			err = -1;
310 		}
311 	}
312 	return err;
313 }
314 
315 char _license[] SEC("license") = "GPL";
316