1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_core_read.h>
7 #include "bpf_misc.h"
8 #include "bpf_experimental.h"
9 
10 struct arr_elem {
11 	struct bpf_res_spin_lock lock;
12 };
13 
14 struct {
15 	__uint(type, BPF_MAP_TYPE_ARRAY);
16 	__uint(max_entries, 1);
17 	__type(key, int);
18 	__type(value, struct arr_elem);
19 } arrmap SEC(".maps");
20 
21 long value;
22 
23 struct bpf_spin_lock lock __hidden SEC(".data.A");
24 struct bpf_res_spin_lock res_lock __hidden SEC(".data.B");
25 
26 SEC("?tc")
27 __failure __msg("point to map value or allocated object")
res_spin_lock_arg(struct __sk_buff * ctx)28 int res_spin_lock_arg(struct __sk_buff *ctx)
29 {
30 	struct arr_elem *elem;
31 
32 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
33 	if (!elem)
34 		return 0;
35 	bpf_res_spin_lock((struct bpf_res_spin_lock *)bpf_core_cast(&elem->lock, struct __sk_buff));
36 	bpf_res_spin_lock(&elem->lock);
37 	return 0;
38 }
39 
40 SEC("?tc")
41 __failure __msg("AA deadlock detected")
res_spin_lock_AA(struct __sk_buff * ctx)42 int res_spin_lock_AA(struct __sk_buff *ctx)
43 {
44 	struct arr_elem *elem;
45 
46 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
47 	if (!elem)
48 		return 0;
49 	bpf_res_spin_lock(&elem->lock);
50 	bpf_res_spin_lock(&elem->lock);
51 	return 0;
52 }
53 
54 SEC("?tc")
55 __failure __msg("AA deadlock detected")
res_spin_lock_cond_AA(struct __sk_buff * ctx)56 int res_spin_lock_cond_AA(struct __sk_buff *ctx)
57 {
58 	struct arr_elem *elem;
59 
60 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
61 	if (!elem)
62 		return 0;
63 	if (bpf_res_spin_lock(&elem->lock))
64 		return 0;
65 	bpf_res_spin_lock(&elem->lock);
66 	return 0;
67 }
68 
69 SEC("?tc")
70 __failure __msg("unlock of different lock")
res_spin_lock_mismatch_1(struct __sk_buff * ctx)71 int res_spin_lock_mismatch_1(struct __sk_buff *ctx)
72 {
73 	struct arr_elem *elem;
74 
75 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
76 	if (!elem)
77 		return 0;
78 	if (bpf_res_spin_lock(&elem->lock))
79 		return 0;
80 	bpf_res_spin_unlock(&res_lock);
81 	return 0;
82 }
83 
84 SEC("?tc")
85 __failure __msg("unlock of different lock")
res_spin_lock_mismatch_2(struct __sk_buff * ctx)86 int res_spin_lock_mismatch_2(struct __sk_buff *ctx)
87 {
88 	struct arr_elem *elem;
89 
90 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
91 	if (!elem)
92 		return 0;
93 	if (bpf_res_spin_lock(&res_lock))
94 		return 0;
95 	bpf_res_spin_unlock(&elem->lock);
96 	return 0;
97 }
98 
99 SEC("?tc")
100 __failure __msg("unlock of different lock")
res_spin_lock_irq_mismatch_1(struct __sk_buff * ctx)101 int res_spin_lock_irq_mismatch_1(struct __sk_buff *ctx)
102 {
103 	struct arr_elem *elem;
104 	unsigned long f1;
105 
106 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
107 	if (!elem)
108 		return 0;
109 	bpf_local_irq_save(&f1);
110 	if (bpf_res_spin_lock(&res_lock))
111 		return 0;
112 	bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
113 	return 0;
114 }
115 
116 SEC("?tc")
117 __failure __msg("unlock of different lock")
res_spin_lock_irq_mismatch_2(struct __sk_buff * ctx)118 int res_spin_lock_irq_mismatch_2(struct __sk_buff *ctx)
119 {
120 	struct arr_elem *elem;
121 	unsigned long f1;
122 
123 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
124 	if (!elem)
125 		return 0;
126 	if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
127 		return 0;
128 	bpf_res_spin_unlock(&res_lock);
129 	return 0;
130 }
131 
132 SEC("?tc")
133 __success
res_spin_lock_ooo(struct __sk_buff * ctx)134 int res_spin_lock_ooo(struct __sk_buff *ctx)
135 {
136 	struct arr_elem *elem;
137 
138 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
139 	if (!elem)
140 		return 0;
141 	if (bpf_res_spin_lock(&res_lock))
142 		return 0;
143 	if (bpf_res_spin_lock(&elem->lock)) {
144 		bpf_res_spin_unlock(&res_lock);
145 		return 0;
146 	}
147 	bpf_res_spin_unlock(&elem->lock);
148 	bpf_res_spin_unlock(&res_lock);
149 	return 0;
150 }
151 
152 SEC("?tc")
153 __success
res_spin_lock_ooo_irq(struct __sk_buff * ctx)154 int res_spin_lock_ooo_irq(struct __sk_buff *ctx)
155 {
156 	struct arr_elem *elem;
157 	unsigned long f1, f2;
158 
159 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
160 	if (!elem)
161 		return 0;
162 	if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
163 		return 0;
164 	if (bpf_res_spin_lock_irqsave(&elem->lock, &f2)) {
165 		bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
166 		/* We won't have a unreleased IRQ flag error here. */
167 		return 0;
168 	}
169 	bpf_res_spin_unlock_irqrestore(&elem->lock, &f2);
170 	bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
171 	return 0;
172 }
173 
174 struct bpf_res_spin_lock lock1 __hidden SEC(".data.OO1");
175 struct bpf_res_spin_lock lock2 __hidden SEC(".data.OO2");
176 
177 SEC("?tc")
178 __failure __msg("bpf_res_spin_unlock cannot be out of order")
res_spin_lock_ooo_unlock(struct __sk_buff * ctx)179 int res_spin_lock_ooo_unlock(struct __sk_buff *ctx)
180 {
181 	if (bpf_res_spin_lock(&lock1))
182 		return 0;
183 	if (bpf_res_spin_lock(&lock2)) {
184 		bpf_res_spin_unlock(&lock1);
185 		return 0;
186 	}
187 	bpf_res_spin_unlock(&lock1);
188 	bpf_res_spin_unlock(&lock2);
189 	return 0;
190 }
191 
192 SEC("?tc")
193 __failure __msg("off 1 doesn't point to 'struct bpf_res_spin_lock' that is at 0")
res_spin_lock_bad_off(struct __sk_buff * ctx)194 int res_spin_lock_bad_off(struct __sk_buff *ctx)
195 {
196 	struct arr_elem *elem;
197 
198 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
199 	if (!elem)
200 		return 0;
201 	bpf_res_spin_lock((void *)&elem->lock + 1);
202 	return 0;
203 }
204 
205 SEC("?tc")
206 __failure __msg("R1 doesn't have constant offset. bpf_res_spin_lock has to be at the constant offset")
res_spin_lock_var_off(struct __sk_buff * ctx)207 int res_spin_lock_var_off(struct __sk_buff *ctx)
208 {
209 	struct arr_elem *elem;
210 	u64 val = value;
211 
212 	elem = bpf_map_lookup_elem(&arrmap, &(int){0});
213 	if (!elem) {
214 		// FIXME: Only inline assembly use in assert macro doesn't emit
215 		//	  BTF definition.
216 		bpf_throw(0);
217 		return 0;
218 	}
219 	bpf_assert_range(val, 0, 40);
220 	bpf_res_spin_lock((void *)&value + val);
221 	return 0;
222 }
223 
224 SEC("?tc")
225 __failure __msg("map 'res_spin.bss' has no valid bpf_res_spin_lock")
res_spin_lock_no_lock_map(struct __sk_buff * ctx)226 int res_spin_lock_no_lock_map(struct __sk_buff *ctx)
227 {
228 	bpf_res_spin_lock((void *)&value + 1);
229 	return 0;
230 }
231 
232 SEC("?tc")
233 __failure __msg("local 'kptr' has no valid bpf_res_spin_lock")
res_spin_lock_no_lock_kptr(struct __sk_buff * ctx)234 int res_spin_lock_no_lock_kptr(struct __sk_buff *ctx)
235 {
236 	struct { int i; } *p = bpf_obj_new(typeof(*p));
237 
238 	if (!p)
239 		return 0;
240 	bpf_res_spin_lock((void *)p);
241 	return 0;
242 }
243 
244 char _license[] SEC("license") = "GPL";
245