1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/prandom.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/objtool.h>
28 #include <linux/overflow.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40 #include <linux/execmem.h>
41
42 #include <asm/barrier.h>
43 #include <linux/unaligned.h>
44
45 /* Registers */
46 #define BPF_R0 regs[BPF_REG_0]
47 #define BPF_R1 regs[BPF_REG_1]
48 #define BPF_R2 regs[BPF_REG_2]
49 #define BPF_R3 regs[BPF_REG_3]
50 #define BPF_R4 regs[BPF_REG_4]
51 #define BPF_R5 regs[BPF_REG_5]
52 #define BPF_R6 regs[BPF_REG_6]
53 #define BPF_R7 regs[BPF_REG_7]
54 #define BPF_R8 regs[BPF_REG_8]
55 #define BPF_R9 regs[BPF_REG_9]
56 #define BPF_R10 regs[BPF_REG_10]
57
58 /* Named registers */
59 #define DST regs[insn->dst_reg]
60 #define SRC regs[insn->src_reg]
61 #define FP regs[BPF_REG_FP]
62 #define AX regs[BPF_REG_AX]
63 #define ARG1 regs[BPF_REG_ARG1]
64 #define CTX regs[BPF_REG_CTX]
65 #define OFF insn->off
66 #define IMM insn->imm
67
68 struct bpf_mem_alloc bpf_global_ma;
69 bool bpf_global_ma_set;
70
71 /* No hurry in this branch
72 *
73 * Exported for the bpf jit load helper.
74 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)75 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
76 {
77 u8 *ptr = NULL;
78
79 if (k >= SKF_NET_OFF) {
80 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
81 } else if (k >= SKF_LL_OFF) {
82 if (unlikely(!skb_mac_header_was_set(skb)))
83 return NULL;
84 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
85 }
86 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
87 return ptr;
88
89 return NULL;
90 }
91
92 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
93 enum page_size_enum {
94 __PAGE_SIZE = PAGE_SIZE
95 };
96
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)97 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
98 {
99 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
100 struct bpf_prog_aux *aux;
101 struct bpf_prog *fp;
102
103 size = round_up(size, __PAGE_SIZE);
104 fp = __vmalloc(size, gfp_flags);
105 if (fp == NULL)
106 return NULL;
107
108 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
109 if (aux == NULL) {
110 vfree(fp);
111 return NULL;
112 }
113 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
114 if (!fp->active) {
115 vfree(fp);
116 kfree(aux);
117 return NULL;
118 }
119
120 fp->pages = size / PAGE_SIZE;
121 fp->aux = aux;
122 fp->aux->prog = fp;
123 fp->jit_requested = ebpf_jit_enabled();
124 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
125 #ifdef CONFIG_CGROUP_BPF
126 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
127 #endif
128
129 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
130 #ifdef CONFIG_FINEIBT
131 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
132 #endif
133 mutex_init(&fp->aux->used_maps_mutex);
134 mutex_init(&fp->aux->ext_mutex);
135 mutex_init(&fp->aux->dst_mutex);
136
137 return fp;
138 }
139
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)140 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
141 {
142 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
143 struct bpf_prog *prog;
144 int cpu;
145
146 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
147 if (!prog)
148 return NULL;
149
150 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
151 if (!prog->stats) {
152 free_percpu(prog->active);
153 kfree(prog->aux);
154 vfree(prog);
155 return NULL;
156 }
157
158 for_each_possible_cpu(cpu) {
159 struct bpf_prog_stats *pstats;
160
161 pstats = per_cpu_ptr(prog->stats, cpu);
162 u64_stats_init(&pstats->syncp);
163 }
164 return prog;
165 }
166 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
167
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)168 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
169 {
170 if (!prog->aux->nr_linfo || !prog->jit_requested)
171 return 0;
172
173 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
174 sizeof(*prog->aux->jited_linfo),
175 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
176 if (!prog->aux->jited_linfo)
177 return -ENOMEM;
178
179 return 0;
180 }
181
bpf_prog_jit_attempt_done(struct bpf_prog * prog)182 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
183 {
184 if (prog->aux->jited_linfo &&
185 (!prog->jited || !prog->aux->jited_linfo[0])) {
186 kvfree(prog->aux->jited_linfo);
187 prog->aux->jited_linfo = NULL;
188 }
189
190 kfree(prog->aux->kfunc_tab);
191 prog->aux->kfunc_tab = NULL;
192 }
193
194 /* The jit engine is responsible to provide an array
195 * for insn_off to the jited_off mapping (insn_to_jit_off).
196 *
197 * The idx to this array is the insn_off. Hence, the insn_off
198 * here is relative to the prog itself instead of the main prog.
199 * This array has one entry for each xlated bpf insn.
200 *
201 * jited_off is the byte off to the end of the jited insn.
202 *
203 * Hence, with
204 * insn_start:
205 * The first bpf insn off of the prog. The insn off
206 * here is relative to the main prog.
207 * e.g. if prog is a subprog, insn_start > 0
208 * linfo_idx:
209 * The prog's idx to prog->aux->linfo and jited_linfo
210 *
211 * jited_linfo[linfo_idx] = prog->bpf_func
212 *
213 * For i > linfo_idx,
214 *
215 * jited_linfo[i] = prog->bpf_func +
216 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
217 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)218 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
219 const u32 *insn_to_jit_off)
220 {
221 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
222 const struct bpf_line_info *linfo;
223 void **jited_linfo;
224
225 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
226 /* Userspace did not provide linfo */
227 return;
228
229 linfo_idx = prog->aux->linfo_idx;
230 linfo = &prog->aux->linfo[linfo_idx];
231 insn_start = linfo[0].insn_off;
232 insn_end = insn_start + prog->len;
233
234 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
235 jited_linfo[0] = prog->bpf_func;
236
237 nr_linfo = prog->aux->nr_linfo - linfo_idx;
238
239 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
240 /* The verifier ensures that linfo[i].insn_off is
241 * strictly increasing
242 */
243 jited_linfo[i] = prog->bpf_func +
244 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
245 }
246
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)247 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
248 gfp_t gfp_extra_flags)
249 {
250 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
251 struct bpf_prog *fp;
252 u32 pages;
253
254 size = round_up(size, PAGE_SIZE);
255 pages = size / PAGE_SIZE;
256 if (pages <= fp_old->pages)
257 return fp_old;
258
259 fp = __vmalloc(size, gfp_flags);
260 if (fp) {
261 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
262 fp->pages = pages;
263 fp->aux->prog = fp;
264
265 /* We keep fp->aux from fp_old around in the new
266 * reallocated structure.
267 */
268 fp_old->aux = NULL;
269 fp_old->stats = NULL;
270 fp_old->active = NULL;
271 __bpf_prog_free(fp_old);
272 }
273
274 return fp;
275 }
276
__bpf_prog_free(struct bpf_prog * fp)277 void __bpf_prog_free(struct bpf_prog *fp)
278 {
279 if (fp->aux) {
280 mutex_destroy(&fp->aux->used_maps_mutex);
281 mutex_destroy(&fp->aux->dst_mutex);
282 kfree(fp->aux->poke_tab);
283 kfree(fp->aux);
284 }
285 free_percpu(fp->stats);
286 free_percpu(fp->active);
287 vfree(fp);
288 }
289
bpf_prog_calc_tag(struct bpf_prog * fp)290 int bpf_prog_calc_tag(struct bpf_prog *fp)
291 {
292 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
293 u32 raw_size = bpf_prog_tag_scratch_size(fp);
294 u32 digest[SHA1_DIGEST_WORDS];
295 u32 ws[SHA1_WORKSPACE_WORDS];
296 u32 i, bsize, psize, blocks;
297 struct bpf_insn *dst;
298 bool was_ld_map;
299 u8 *raw, *todo;
300 __be32 *result;
301 __be64 *bits;
302
303 raw = vmalloc(raw_size);
304 if (!raw)
305 return -ENOMEM;
306
307 sha1_init(digest);
308 memset(ws, 0, sizeof(ws));
309
310 /* We need to take out the map fd for the digest calculation
311 * since they are unstable from user space side.
312 */
313 dst = (void *)raw;
314 for (i = 0, was_ld_map = false; i < fp->len; i++) {
315 dst[i] = fp->insnsi[i];
316 if (!was_ld_map &&
317 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
318 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
319 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
320 was_ld_map = true;
321 dst[i].imm = 0;
322 } else if (was_ld_map &&
323 dst[i].code == 0 &&
324 dst[i].dst_reg == 0 &&
325 dst[i].src_reg == 0 &&
326 dst[i].off == 0) {
327 was_ld_map = false;
328 dst[i].imm = 0;
329 } else {
330 was_ld_map = false;
331 }
332 }
333
334 psize = bpf_prog_insn_size(fp);
335 memset(&raw[psize], 0, raw_size - psize);
336 raw[psize++] = 0x80;
337
338 bsize = round_up(psize, SHA1_BLOCK_SIZE);
339 blocks = bsize / SHA1_BLOCK_SIZE;
340 todo = raw;
341 if (bsize - psize >= sizeof(__be64)) {
342 bits = (__be64 *)(todo + bsize - sizeof(__be64));
343 } else {
344 bits = (__be64 *)(todo + bsize + bits_offset);
345 blocks++;
346 }
347 *bits = cpu_to_be64((psize - 1) << 3);
348
349 while (blocks--) {
350 sha1_transform(digest, todo, ws);
351 todo += SHA1_BLOCK_SIZE;
352 }
353
354 result = (__force __be32 *)digest;
355 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
356 result[i] = cpu_to_be32(digest[i]);
357 memcpy(fp->tag, result, sizeof(fp->tag));
358
359 vfree(raw);
360 return 0;
361 }
362
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)363 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
364 s32 end_new, s32 curr, const bool probe_pass)
365 {
366 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
367 s32 delta = end_new - end_old;
368 s64 imm = insn->imm;
369
370 if (curr < pos && curr + imm + 1 >= end_old)
371 imm += delta;
372 else if (curr >= end_new && curr + imm + 1 < end_new)
373 imm -= delta;
374 if (imm < imm_min || imm > imm_max)
375 return -ERANGE;
376 if (!probe_pass)
377 insn->imm = imm;
378 return 0;
379 }
380
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)381 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
382 s32 end_new, s32 curr, const bool probe_pass)
383 {
384 s64 off_min, off_max, off;
385 s32 delta = end_new - end_old;
386
387 if (insn->code == (BPF_JMP32 | BPF_JA)) {
388 off = insn->imm;
389 off_min = S32_MIN;
390 off_max = S32_MAX;
391 } else {
392 off = insn->off;
393 off_min = S16_MIN;
394 off_max = S16_MAX;
395 }
396
397 if (curr < pos && curr + off + 1 >= end_old)
398 off += delta;
399 else if (curr >= end_new && curr + off + 1 < end_new)
400 off -= delta;
401 if (off < off_min || off > off_max)
402 return -ERANGE;
403 if (!probe_pass) {
404 if (insn->code == (BPF_JMP32 | BPF_JA))
405 insn->imm = off;
406 else
407 insn->off = off;
408 }
409 return 0;
410 }
411
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)412 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
413 s32 end_new, const bool probe_pass)
414 {
415 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
416 struct bpf_insn *insn = prog->insnsi;
417 int ret = 0;
418
419 for (i = 0; i < insn_cnt; i++, insn++) {
420 u8 code;
421
422 /* In the probing pass we still operate on the original,
423 * unpatched image in order to check overflows before we
424 * do any other adjustments. Therefore skip the patchlet.
425 */
426 if (probe_pass && i == pos) {
427 i = end_new;
428 insn = prog->insnsi + end_old;
429 }
430 if (bpf_pseudo_func(insn)) {
431 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
432 end_new, i, probe_pass);
433 if (ret)
434 return ret;
435 continue;
436 }
437 code = insn->code;
438 if ((BPF_CLASS(code) != BPF_JMP &&
439 BPF_CLASS(code) != BPF_JMP32) ||
440 BPF_OP(code) == BPF_EXIT)
441 continue;
442 /* Adjust offset of jmps if we cross patch boundaries. */
443 if (BPF_OP(code) == BPF_CALL) {
444 if (insn->src_reg != BPF_PSEUDO_CALL)
445 continue;
446 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
447 end_new, i, probe_pass);
448 } else {
449 ret = bpf_adj_delta_to_off(insn, pos, end_old,
450 end_new, i, probe_pass);
451 }
452 if (ret)
453 break;
454 }
455
456 return ret;
457 }
458
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)459 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
460 {
461 struct bpf_line_info *linfo;
462 u32 i, nr_linfo;
463
464 nr_linfo = prog->aux->nr_linfo;
465 if (!nr_linfo || !delta)
466 return;
467
468 linfo = prog->aux->linfo;
469
470 for (i = 0; i < nr_linfo; i++)
471 if (off < linfo[i].insn_off)
472 break;
473
474 /* Push all off < linfo[i].insn_off by delta */
475 for (; i < nr_linfo; i++)
476 linfo[i].insn_off += delta;
477 }
478
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)479 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
480 const struct bpf_insn *patch, u32 len)
481 {
482 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
483 const u32 cnt_max = S16_MAX;
484 struct bpf_prog *prog_adj;
485 int err;
486
487 /* Since our patchlet doesn't expand the image, we're done. */
488 if (insn_delta == 0) {
489 memcpy(prog->insnsi + off, patch, sizeof(*patch));
490 return prog;
491 }
492
493 insn_adj_cnt = prog->len + insn_delta;
494
495 /* Reject anything that would potentially let the insn->off
496 * target overflow when we have excessive program expansions.
497 * We need to probe here before we do any reallocation where
498 * we afterwards may not fail anymore.
499 */
500 if (insn_adj_cnt > cnt_max &&
501 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
502 return ERR_PTR(err);
503
504 /* Several new instructions need to be inserted. Make room
505 * for them. Likely, there's no need for a new allocation as
506 * last page could have large enough tailroom.
507 */
508 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
509 GFP_USER);
510 if (!prog_adj)
511 return ERR_PTR(-ENOMEM);
512
513 prog_adj->len = insn_adj_cnt;
514
515 /* Patching happens in 3 steps:
516 *
517 * 1) Move over tail of insnsi from next instruction onwards,
518 * so we can patch the single target insn with one or more
519 * new ones (patching is always from 1 to n insns, n > 0).
520 * 2) Inject new instructions at the target location.
521 * 3) Adjust branch offsets if necessary.
522 */
523 insn_rest = insn_adj_cnt - off - len;
524
525 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
526 sizeof(*patch) * insn_rest);
527 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
528
529 /* We are guaranteed to not fail at this point, otherwise
530 * the ship has sailed to reverse to the original state. An
531 * overflow cannot happen at this point.
532 */
533 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
534
535 bpf_adj_linfo(prog_adj, off, insn_delta);
536
537 return prog_adj;
538 }
539
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)540 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
541 {
542 int err;
543
544 /* Branch offsets can't overflow when program is shrinking, no need
545 * to call bpf_adj_branches(..., true) here
546 */
547 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
548 sizeof(struct bpf_insn) * (prog->len - off - cnt));
549 prog->len -= cnt;
550
551 err = bpf_adj_branches(prog, off, off + cnt, off, false);
552 WARN_ON_ONCE(err);
553 return err;
554 }
555
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)556 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
557 {
558 int i;
559
560 for (i = 0; i < fp->aux->real_func_cnt; i++)
561 bpf_prog_kallsyms_del(fp->aux->func[i]);
562 }
563
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)564 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
565 {
566 bpf_prog_kallsyms_del_subprogs(fp);
567 bpf_prog_kallsyms_del(fp);
568 }
569
570 #ifdef CONFIG_BPF_JIT
571 /* All BPF JIT sysctl knobs here. */
572 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
573 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
574 int bpf_jit_harden __read_mostly;
575 long bpf_jit_limit __read_mostly;
576 long bpf_jit_limit_max __read_mostly;
577
578 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)579 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
580 {
581 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
582
583 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
584 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
585 }
586
587 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)588 bpf_prog_ksym_set_name(struct bpf_prog *prog)
589 {
590 char *sym = prog->aux->ksym.name;
591 const char *end = sym + KSYM_NAME_LEN;
592 const struct btf_type *type;
593 const char *func_name;
594
595 BUILD_BUG_ON(sizeof("bpf_prog_") +
596 sizeof(prog->tag) * 2 +
597 /* name has been null terminated.
598 * We should need +1 for the '_' preceding
599 * the name. However, the null character
600 * is double counted between the name and the
601 * sizeof("bpf_prog_") above, so we omit
602 * the +1 here.
603 */
604 sizeof(prog->aux->name) > KSYM_NAME_LEN);
605
606 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
607 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
608
609 /* prog->aux->name will be ignored if full btf name is available */
610 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
611 type = btf_type_by_id(prog->aux->btf,
612 prog->aux->func_info[prog->aux->func_idx].type_id);
613 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
614 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
615 return;
616 }
617
618 if (prog->aux->name[0])
619 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
620 else
621 *sym = 0;
622 }
623
bpf_get_ksym_start(struct latch_tree_node * n)624 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
625 {
626 return container_of(n, struct bpf_ksym, tnode)->start;
627 }
628
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)629 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
630 struct latch_tree_node *b)
631 {
632 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
633 }
634
bpf_tree_comp(void * key,struct latch_tree_node * n)635 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
636 {
637 unsigned long val = (unsigned long)key;
638 const struct bpf_ksym *ksym;
639
640 ksym = container_of(n, struct bpf_ksym, tnode);
641
642 if (val < ksym->start)
643 return -1;
644 /* Ensure that we detect return addresses as part of the program, when
645 * the final instruction is a call for a program part of the stack
646 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
647 */
648 if (val > ksym->end)
649 return 1;
650
651 return 0;
652 }
653
654 static const struct latch_tree_ops bpf_tree_ops = {
655 .less = bpf_tree_less,
656 .comp = bpf_tree_comp,
657 };
658
659 static DEFINE_SPINLOCK(bpf_lock);
660 static LIST_HEAD(bpf_kallsyms);
661 static struct latch_tree_root bpf_tree __cacheline_aligned;
662
bpf_ksym_add(struct bpf_ksym * ksym)663 void bpf_ksym_add(struct bpf_ksym *ksym)
664 {
665 spin_lock_bh(&bpf_lock);
666 WARN_ON_ONCE(!list_empty(&ksym->lnode));
667 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
668 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
669 spin_unlock_bh(&bpf_lock);
670 }
671
__bpf_ksym_del(struct bpf_ksym * ksym)672 static void __bpf_ksym_del(struct bpf_ksym *ksym)
673 {
674 if (list_empty(&ksym->lnode))
675 return;
676
677 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
678 list_del_rcu(&ksym->lnode);
679 }
680
bpf_ksym_del(struct bpf_ksym * ksym)681 void bpf_ksym_del(struct bpf_ksym *ksym)
682 {
683 spin_lock_bh(&bpf_lock);
684 __bpf_ksym_del(ksym);
685 spin_unlock_bh(&bpf_lock);
686 }
687
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)688 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
689 {
690 return fp->jited && !bpf_prog_was_classic(fp);
691 }
692
bpf_prog_kallsyms_add(struct bpf_prog * fp)693 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
694 {
695 if (!bpf_prog_kallsyms_candidate(fp) ||
696 !bpf_token_capable(fp->aux->token, CAP_BPF))
697 return;
698
699 bpf_prog_ksym_set_addr(fp);
700 bpf_prog_ksym_set_name(fp);
701 fp->aux->ksym.prog = true;
702
703 bpf_ksym_add(&fp->aux->ksym);
704
705 #ifdef CONFIG_FINEIBT
706 /*
707 * When FineIBT, code in the __cfi_foo() symbols can get executed
708 * and hence unwinder needs help.
709 */
710 if (cfi_mode != CFI_FINEIBT)
711 return;
712
713 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
714 "__cfi_%s", fp->aux->ksym.name);
715
716 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
717 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
718
719 bpf_ksym_add(&fp->aux->ksym_prefix);
720 #endif
721 }
722
bpf_prog_kallsyms_del(struct bpf_prog * fp)723 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
724 {
725 if (!bpf_prog_kallsyms_candidate(fp))
726 return;
727
728 bpf_ksym_del(&fp->aux->ksym);
729 #ifdef CONFIG_FINEIBT
730 if (cfi_mode != CFI_FINEIBT)
731 return;
732 bpf_ksym_del(&fp->aux->ksym_prefix);
733 #endif
734 }
735
bpf_ksym_find(unsigned long addr)736 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
737 {
738 struct latch_tree_node *n;
739
740 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
741 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
742 }
743
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)744 int __bpf_address_lookup(unsigned long addr, unsigned long *size,
745 unsigned long *off, char *sym)
746 {
747 struct bpf_ksym *ksym;
748 int ret = 0;
749
750 rcu_read_lock();
751 ksym = bpf_ksym_find(addr);
752 if (ksym) {
753 unsigned long symbol_start = ksym->start;
754 unsigned long symbol_end = ksym->end;
755
756 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
757
758 if (size)
759 *size = symbol_end - symbol_start;
760 if (off)
761 *off = addr - symbol_start;
762 }
763 rcu_read_unlock();
764
765 return ret;
766 }
767
is_bpf_text_address(unsigned long addr)768 bool is_bpf_text_address(unsigned long addr)
769 {
770 bool ret;
771
772 rcu_read_lock();
773 ret = bpf_ksym_find(addr) != NULL;
774 rcu_read_unlock();
775
776 return ret;
777 }
778
bpf_prog_ksym_find(unsigned long addr)779 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
780 {
781 struct bpf_ksym *ksym = bpf_ksym_find(addr);
782
783 return ksym && ksym->prog ?
784 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
785 NULL;
786 }
787
search_bpf_extables(unsigned long addr)788 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
789 {
790 const struct exception_table_entry *e = NULL;
791 struct bpf_prog *prog;
792
793 rcu_read_lock();
794 prog = bpf_prog_ksym_find(addr);
795 if (!prog)
796 goto out;
797 if (!prog->aux->num_exentries)
798 goto out;
799
800 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
801 out:
802 rcu_read_unlock();
803 return e;
804 }
805
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)806 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
807 char *sym)
808 {
809 struct bpf_ksym *ksym;
810 unsigned int it = 0;
811 int ret = -ERANGE;
812
813 if (!bpf_jit_kallsyms_enabled())
814 return ret;
815
816 rcu_read_lock();
817 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
818 if (it++ != symnum)
819 continue;
820
821 strscpy(sym, ksym->name, KSYM_NAME_LEN);
822
823 *value = ksym->start;
824 *type = BPF_SYM_ELF_TYPE;
825
826 ret = 0;
827 break;
828 }
829 rcu_read_unlock();
830
831 return ret;
832 }
833
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)834 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
835 struct bpf_jit_poke_descriptor *poke)
836 {
837 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
838 static const u32 poke_tab_max = 1024;
839 u32 slot = prog->aux->size_poke_tab;
840 u32 size = slot + 1;
841
842 if (size > poke_tab_max)
843 return -ENOSPC;
844 if (poke->tailcall_target || poke->tailcall_target_stable ||
845 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
846 return -EINVAL;
847
848 switch (poke->reason) {
849 case BPF_POKE_REASON_TAIL_CALL:
850 if (!poke->tail_call.map)
851 return -EINVAL;
852 break;
853 default:
854 return -EINVAL;
855 }
856
857 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
858 if (!tab)
859 return -ENOMEM;
860
861 memcpy(&tab[slot], poke, sizeof(*poke));
862 prog->aux->size_poke_tab = size;
863 prog->aux->poke_tab = tab;
864
865 return slot;
866 }
867
868 /*
869 * BPF program pack allocator.
870 *
871 * Most BPF programs are pretty small. Allocating a hole page for each
872 * program is sometime a waste. Many small bpf program also adds pressure
873 * to instruction TLB. To solve this issue, we introduce a BPF program pack
874 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
875 * to host BPF programs.
876 */
877 #define BPF_PROG_CHUNK_SHIFT 6
878 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
879 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
880
881 struct bpf_prog_pack {
882 struct list_head list;
883 void *ptr;
884 unsigned long bitmap[];
885 };
886
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)887 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
888 {
889 memset(area, 0, size);
890 }
891
892 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
893
894 static DEFINE_MUTEX(pack_mutex);
895 static LIST_HEAD(pack_list);
896
897 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
898 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
899 */
900 #ifdef PMD_SIZE
901 /* PMD_SIZE is really big for some archs. It doesn't make sense to
902 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
903 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
904 * greater than or equal to 2MB.
905 */
906 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
907 #else
908 #define BPF_PROG_PACK_SIZE PAGE_SIZE
909 #endif
910
911 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
912
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)913 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
914 {
915 struct bpf_prog_pack *pack;
916 int err;
917
918 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
919 GFP_KERNEL);
920 if (!pack)
921 return NULL;
922 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
923 if (!pack->ptr)
924 goto out;
925 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
926 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
927
928 set_vm_flush_reset_perms(pack->ptr);
929 err = set_memory_rox((unsigned long)pack->ptr,
930 BPF_PROG_PACK_SIZE / PAGE_SIZE);
931 if (err)
932 goto out;
933 list_add_tail(&pack->list, &pack_list);
934 return pack;
935
936 out:
937 bpf_jit_free_exec(pack->ptr);
938 kfree(pack);
939 return NULL;
940 }
941
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)942 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
943 {
944 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
945 struct bpf_prog_pack *pack;
946 unsigned long pos;
947 void *ptr = NULL;
948
949 mutex_lock(&pack_mutex);
950 if (size > BPF_PROG_PACK_SIZE) {
951 size = round_up(size, PAGE_SIZE);
952 ptr = bpf_jit_alloc_exec(size);
953 if (ptr) {
954 int err;
955
956 bpf_fill_ill_insns(ptr, size);
957 set_vm_flush_reset_perms(ptr);
958 err = set_memory_rox((unsigned long)ptr,
959 size / PAGE_SIZE);
960 if (err) {
961 bpf_jit_free_exec(ptr);
962 ptr = NULL;
963 }
964 }
965 goto out;
966 }
967 list_for_each_entry(pack, &pack_list, list) {
968 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
969 nbits, 0);
970 if (pos < BPF_PROG_CHUNK_COUNT)
971 goto found_free_area;
972 }
973
974 pack = alloc_new_pack(bpf_fill_ill_insns);
975 if (!pack)
976 goto out;
977
978 pos = 0;
979
980 found_free_area:
981 bitmap_set(pack->bitmap, pos, nbits);
982 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
983
984 out:
985 mutex_unlock(&pack_mutex);
986 return ptr;
987 }
988
bpf_prog_pack_free(void * ptr,u32 size)989 void bpf_prog_pack_free(void *ptr, u32 size)
990 {
991 struct bpf_prog_pack *pack = NULL, *tmp;
992 unsigned int nbits;
993 unsigned long pos;
994
995 mutex_lock(&pack_mutex);
996 if (size > BPF_PROG_PACK_SIZE) {
997 bpf_jit_free_exec(ptr);
998 goto out;
999 }
1000
1001 list_for_each_entry(tmp, &pack_list, list) {
1002 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
1003 pack = tmp;
1004 break;
1005 }
1006 }
1007
1008 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1009 goto out;
1010
1011 nbits = BPF_PROG_SIZE_TO_NBITS(size);
1012 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1013
1014 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1015 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1016
1017 bitmap_clear(pack->bitmap, pos, nbits);
1018 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1019 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1020 list_del(&pack->list);
1021 bpf_jit_free_exec(pack->ptr);
1022 kfree(pack);
1023 }
1024 out:
1025 mutex_unlock(&pack_mutex);
1026 }
1027
1028 static atomic_long_t bpf_jit_current;
1029
1030 /* Can be overridden by an arch's JIT compiler if it has a custom,
1031 * dedicated BPF backend memory area, or if neither of the two
1032 * below apply.
1033 */
bpf_jit_alloc_exec_limit(void)1034 u64 __weak bpf_jit_alloc_exec_limit(void)
1035 {
1036 #if defined(MODULES_VADDR)
1037 return MODULES_END - MODULES_VADDR;
1038 #else
1039 return VMALLOC_END - VMALLOC_START;
1040 #endif
1041 }
1042
bpf_jit_charge_init(void)1043 static int __init bpf_jit_charge_init(void)
1044 {
1045 /* Only used as heuristic here to derive limit. */
1046 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1047 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1048 PAGE_SIZE), LONG_MAX);
1049 return 0;
1050 }
1051 pure_initcall(bpf_jit_charge_init);
1052
bpf_jit_charge_modmem(u32 size)1053 int bpf_jit_charge_modmem(u32 size)
1054 {
1055 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1056 if (!bpf_capable()) {
1057 atomic_long_sub(size, &bpf_jit_current);
1058 return -EPERM;
1059 }
1060 }
1061
1062 return 0;
1063 }
1064
bpf_jit_uncharge_modmem(u32 size)1065 void bpf_jit_uncharge_modmem(u32 size)
1066 {
1067 atomic_long_sub(size, &bpf_jit_current);
1068 }
1069
bpf_jit_alloc_exec(unsigned long size)1070 void *__weak bpf_jit_alloc_exec(unsigned long size)
1071 {
1072 return execmem_alloc(EXECMEM_BPF, size);
1073 }
1074
bpf_jit_free_exec(void * addr)1075 void __weak bpf_jit_free_exec(void *addr)
1076 {
1077 execmem_free(addr);
1078 }
1079
1080 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1081 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1082 unsigned int alignment,
1083 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1084 {
1085 struct bpf_binary_header *hdr;
1086 u32 size, hole, start;
1087
1088 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1089 alignment > BPF_IMAGE_ALIGNMENT);
1090
1091 /* Most of BPF filters are really small, but if some of them
1092 * fill a page, allow at least 128 extra bytes to insert a
1093 * random section of illegal instructions.
1094 */
1095 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1096
1097 if (bpf_jit_charge_modmem(size))
1098 return NULL;
1099 hdr = bpf_jit_alloc_exec(size);
1100 if (!hdr) {
1101 bpf_jit_uncharge_modmem(size);
1102 return NULL;
1103 }
1104
1105 /* Fill space with illegal/arch-dep instructions. */
1106 bpf_fill_ill_insns(hdr, size);
1107
1108 hdr->size = size;
1109 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1110 PAGE_SIZE - sizeof(*hdr));
1111 start = get_random_u32_below(hole) & ~(alignment - 1);
1112
1113 /* Leave a random number of instructions before BPF code. */
1114 *image_ptr = &hdr->image[start];
1115
1116 return hdr;
1117 }
1118
bpf_jit_binary_free(struct bpf_binary_header * hdr)1119 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1120 {
1121 u32 size = hdr->size;
1122
1123 bpf_jit_free_exec(hdr);
1124 bpf_jit_uncharge_modmem(size);
1125 }
1126
1127 /* Allocate jit binary from bpf_prog_pack allocator.
1128 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1129 * to the memory. To solve this problem, a RW buffer is also allocated at
1130 * as the same time. The JIT engine should calculate offsets based on the
1131 * RO memory address, but write JITed program to the RW buffer. Once the
1132 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1133 * the JITed program to the RO memory.
1134 */
1135 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1136 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1137 unsigned int alignment,
1138 struct bpf_binary_header **rw_header,
1139 u8 **rw_image,
1140 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1141 {
1142 struct bpf_binary_header *ro_header;
1143 u32 size, hole, start;
1144
1145 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1146 alignment > BPF_IMAGE_ALIGNMENT);
1147
1148 /* add 16 bytes for a random section of illegal instructions */
1149 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1150
1151 if (bpf_jit_charge_modmem(size))
1152 return NULL;
1153 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1154 if (!ro_header) {
1155 bpf_jit_uncharge_modmem(size);
1156 return NULL;
1157 }
1158
1159 *rw_header = kvmalloc(size, GFP_KERNEL);
1160 if (!*rw_header) {
1161 bpf_prog_pack_free(ro_header, size);
1162 bpf_jit_uncharge_modmem(size);
1163 return NULL;
1164 }
1165
1166 /* Fill space with illegal/arch-dep instructions. */
1167 bpf_fill_ill_insns(*rw_header, size);
1168 (*rw_header)->size = size;
1169
1170 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1171 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1172 start = get_random_u32_below(hole) & ~(alignment - 1);
1173
1174 *image_ptr = &ro_header->image[start];
1175 *rw_image = &(*rw_header)->image[start];
1176
1177 return ro_header;
1178 }
1179
1180 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1181 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1182 struct bpf_binary_header *rw_header)
1183 {
1184 void *ptr;
1185
1186 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1187
1188 kvfree(rw_header);
1189
1190 if (IS_ERR(ptr)) {
1191 bpf_prog_pack_free(ro_header, ro_header->size);
1192 return PTR_ERR(ptr);
1193 }
1194 return 0;
1195 }
1196
1197 /* bpf_jit_binary_pack_free is called in two different scenarios:
1198 * 1) when the program is freed after;
1199 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1200 * For case 2), we need to free both the RO memory and the RW buffer.
1201 *
1202 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1203 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1204 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1205 * bpf_arch_text_copy (when jit fails).
1206 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1207 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1208 struct bpf_binary_header *rw_header)
1209 {
1210 u32 size = ro_header->size;
1211
1212 bpf_prog_pack_free(ro_header, size);
1213 kvfree(rw_header);
1214 bpf_jit_uncharge_modmem(size);
1215 }
1216
1217 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1218 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1219 {
1220 unsigned long real_start = (unsigned long)fp->bpf_func;
1221 unsigned long addr;
1222
1223 addr = real_start & BPF_PROG_CHUNK_MASK;
1224 return (void *)addr;
1225 }
1226
1227 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1228 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1229 {
1230 unsigned long real_start = (unsigned long)fp->bpf_func;
1231 unsigned long addr;
1232
1233 addr = real_start & PAGE_MASK;
1234 return (void *)addr;
1235 }
1236
1237 /* This symbol is only overridden by archs that have different
1238 * requirements than the usual eBPF JITs, f.e. when they only
1239 * implement cBPF JIT, do not set images read-only, etc.
1240 */
bpf_jit_free(struct bpf_prog * fp)1241 void __weak bpf_jit_free(struct bpf_prog *fp)
1242 {
1243 if (fp->jited) {
1244 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1245
1246 bpf_jit_binary_free(hdr);
1247 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1248 }
1249
1250 bpf_prog_unlock_free(fp);
1251 }
1252
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1253 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1254 const struct bpf_insn *insn, bool extra_pass,
1255 u64 *func_addr, bool *func_addr_fixed)
1256 {
1257 s16 off = insn->off;
1258 s32 imm = insn->imm;
1259 u8 *addr;
1260 int err;
1261
1262 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1263 if (!*func_addr_fixed) {
1264 /* Place-holder address till the last pass has collected
1265 * all addresses for JITed subprograms in which case we
1266 * can pick them up from prog->aux.
1267 */
1268 if (!extra_pass)
1269 addr = NULL;
1270 else if (prog->aux->func &&
1271 off >= 0 && off < prog->aux->real_func_cnt)
1272 addr = (u8 *)prog->aux->func[off]->bpf_func;
1273 else
1274 return -EINVAL;
1275 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1276 bpf_jit_supports_far_kfunc_call()) {
1277 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1278 if (err)
1279 return err;
1280 } else {
1281 /* Address of a BPF helper call. Since part of the core
1282 * kernel, it's always at a fixed location. __bpf_call_base
1283 * and the helper with imm relative to it are both in core
1284 * kernel.
1285 */
1286 addr = (u8 *)__bpf_call_base + imm;
1287 }
1288
1289 *func_addr = (unsigned long)addr;
1290 return 0;
1291 }
1292
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1293 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1294 const struct bpf_insn *aux,
1295 struct bpf_insn *to_buff,
1296 bool emit_zext)
1297 {
1298 struct bpf_insn *to = to_buff;
1299 u32 imm_rnd = get_random_u32();
1300 s16 off;
1301
1302 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1303 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1304
1305 /* Constraints on AX register:
1306 *
1307 * AX register is inaccessible from user space. It is mapped in
1308 * all JITs, and used here for constant blinding rewrites. It is
1309 * typically "stateless" meaning its contents are only valid within
1310 * the executed instruction, but not across several instructions.
1311 * There are a few exceptions however which are further detailed
1312 * below.
1313 *
1314 * Constant blinding is only used by JITs, not in the interpreter.
1315 * The interpreter uses AX in some occasions as a local temporary
1316 * register e.g. in DIV or MOD instructions.
1317 *
1318 * In restricted circumstances, the verifier can also use the AX
1319 * register for rewrites as long as they do not interfere with
1320 * the above cases!
1321 */
1322 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1323 goto out;
1324
1325 if (from->imm == 0 &&
1326 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1327 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1328 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1329 goto out;
1330 }
1331
1332 switch (from->code) {
1333 case BPF_ALU | BPF_ADD | BPF_K:
1334 case BPF_ALU | BPF_SUB | BPF_K:
1335 case BPF_ALU | BPF_AND | BPF_K:
1336 case BPF_ALU | BPF_OR | BPF_K:
1337 case BPF_ALU | BPF_XOR | BPF_K:
1338 case BPF_ALU | BPF_MUL | BPF_K:
1339 case BPF_ALU | BPF_MOV | BPF_K:
1340 case BPF_ALU | BPF_DIV | BPF_K:
1341 case BPF_ALU | BPF_MOD | BPF_K:
1342 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1343 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1344 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1345 break;
1346
1347 case BPF_ALU64 | BPF_ADD | BPF_K:
1348 case BPF_ALU64 | BPF_SUB | BPF_K:
1349 case BPF_ALU64 | BPF_AND | BPF_K:
1350 case BPF_ALU64 | BPF_OR | BPF_K:
1351 case BPF_ALU64 | BPF_XOR | BPF_K:
1352 case BPF_ALU64 | BPF_MUL | BPF_K:
1353 case BPF_ALU64 | BPF_MOV | BPF_K:
1354 case BPF_ALU64 | BPF_DIV | BPF_K:
1355 case BPF_ALU64 | BPF_MOD | BPF_K:
1356 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1357 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1358 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1359 break;
1360
1361 case BPF_JMP | BPF_JEQ | BPF_K:
1362 case BPF_JMP | BPF_JNE | BPF_K:
1363 case BPF_JMP | BPF_JGT | BPF_K:
1364 case BPF_JMP | BPF_JLT | BPF_K:
1365 case BPF_JMP | BPF_JGE | BPF_K:
1366 case BPF_JMP | BPF_JLE | BPF_K:
1367 case BPF_JMP | BPF_JSGT | BPF_K:
1368 case BPF_JMP | BPF_JSLT | BPF_K:
1369 case BPF_JMP | BPF_JSGE | BPF_K:
1370 case BPF_JMP | BPF_JSLE | BPF_K:
1371 case BPF_JMP | BPF_JSET | BPF_K:
1372 /* Accommodate for extra offset in case of a backjump. */
1373 off = from->off;
1374 if (off < 0)
1375 off -= 2;
1376 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1377 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1378 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1379 break;
1380
1381 case BPF_JMP32 | BPF_JEQ | BPF_K:
1382 case BPF_JMP32 | BPF_JNE | BPF_K:
1383 case BPF_JMP32 | BPF_JGT | BPF_K:
1384 case BPF_JMP32 | BPF_JLT | BPF_K:
1385 case BPF_JMP32 | BPF_JGE | BPF_K:
1386 case BPF_JMP32 | BPF_JLE | BPF_K:
1387 case BPF_JMP32 | BPF_JSGT | BPF_K:
1388 case BPF_JMP32 | BPF_JSLT | BPF_K:
1389 case BPF_JMP32 | BPF_JSGE | BPF_K:
1390 case BPF_JMP32 | BPF_JSLE | BPF_K:
1391 case BPF_JMP32 | BPF_JSET | BPF_K:
1392 /* Accommodate for extra offset in case of a backjump. */
1393 off = from->off;
1394 if (off < 0)
1395 off -= 2;
1396 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1397 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1398 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1399 off);
1400 break;
1401
1402 case BPF_LD | BPF_IMM | BPF_DW:
1403 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1404 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1405 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1406 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1407 break;
1408 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1409 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1410 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1411 if (emit_zext)
1412 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1413 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1414 break;
1415
1416 case BPF_ST | BPF_MEM | BPF_DW:
1417 case BPF_ST | BPF_MEM | BPF_W:
1418 case BPF_ST | BPF_MEM | BPF_H:
1419 case BPF_ST | BPF_MEM | BPF_B:
1420 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1421 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1422 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1423 break;
1424 }
1425 out:
1426 return to - to_buff;
1427 }
1428
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1429 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1430 gfp_t gfp_extra_flags)
1431 {
1432 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1433 struct bpf_prog *fp;
1434
1435 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1436 if (fp != NULL) {
1437 /* aux->prog still points to the fp_other one, so
1438 * when promoting the clone to the real program,
1439 * this still needs to be adapted.
1440 */
1441 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1442 }
1443
1444 return fp;
1445 }
1446
bpf_prog_clone_free(struct bpf_prog * fp)1447 static void bpf_prog_clone_free(struct bpf_prog *fp)
1448 {
1449 /* aux was stolen by the other clone, so we cannot free
1450 * it from this path! It will be freed eventually by the
1451 * other program on release.
1452 *
1453 * At this point, we don't need a deferred release since
1454 * clone is guaranteed to not be locked.
1455 */
1456 fp->aux = NULL;
1457 fp->stats = NULL;
1458 fp->active = NULL;
1459 __bpf_prog_free(fp);
1460 }
1461
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1462 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1463 {
1464 /* We have to repoint aux->prog to self, as we don't
1465 * know whether fp here is the clone or the original.
1466 */
1467 fp->aux->prog = fp;
1468 bpf_prog_clone_free(fp_other);
1469 }
1470
bpf_jit_blind_constants(struct bpf_prog * prog)1471 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1472 {
1473 struct bpf_insn insn_buff[16], aux[2];
1474 struct bpf_prog *clone, *tmp;
1475 int insn_delta, insn_cnt;
1476 struct bpf_insn *insn;
1477 int i, rewritten;
1478
1479 if (!prog->blinding_requested || prog->blinded)
1480 return prog;
1481
1482 clone = bpf_prog_clone_create(prog, GFP_USER);
1483 if (!clone)
1484 return ERR_PTR(-ENOMEM);
1485
1486 insn_cnt = clone->len;
1487 insn = clone->insnsi;
1488
1489 for (i = 0; i < insn_cnt; i++, insn++) {
1490 if (bpf_pseudo_func(insn)) {
1491 /* ld_imm64 with an address of bpf subprog is not
1492 * a user controlled constant. Don't randomize it,
1493 * since it will conflict with jit_subprogs() logic.
1494 */
1495 insn++;
1496 i++;
1497 continue;
1498 }
1499
1500 /* We temporarily need to hold the original ld64 insn
1501 * so that we can still access the first part in the
1502 * second blinding run.
1503 */
1504 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1505 insn[1].code == 0)
1506 memcpy(aux, insn, sizeof(aux));
1507
1508 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1509 clone->aux->verifier_zext);
1510 if (!rewritten)
1511 continue;
1512
1513 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1514 if (IS_ERR(tmp)) {
1515 /* Patching may have repointed aux->prog during
1516 * realloc from the original one, so we need to
1517 * fix it up here on error.
1518 */
1519 bpf_jit_prog_release_other(prog, clone);
1520 return tmp;
1521 }
1522
1523 clone = tmp;
1524 insn_delta = rewritten - 1;
1525
1526 /* Walk new program and skip insns we just inserted. */
1527 insn = clone->insnsi + i + insn_delta;
1528 insn_cnt += insn_delta;
1529 i += insn_delta;
1530 }
1531
1532 clone->blinded = 1;
1533 return clone;
1534 }
1535 #endif /* CONFIG_BPF_JIT */
1536
1537 /* Base function for offset calculation. Needs to go into .text section,
1538 * therefore keeping it non-static as well; will also be used by JITs
1539 * anyway later on, so do not let the compiler omit it. This also needs
1540 * to go into kallsyms for correlation from e.g. bpftool, so naming
1541 * must not change.
1542 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1543 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1544 {
1545 return 0;
1546 }
1547 EXPORT_SYMBOL_GPL(__bpf_call_base);
1548
1549 /* All UAPI available opcodes. */
1550 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1551 /* 32 bit ALU operations. */ \
1552 /* Register based. */ \
1553 INSN_3(ALU, ADD, X), \
1554 INSN_3(ALU, SUB, X), \
1555 INSN_3(ALU, AND, X), \
1556 INSN_3(ALU, OR, X), \
1557 INSN_3(ALU, LSH, X), \
1558 INSN_3(ALU, RSH, X), \
1559 INSN_3(ALU, XOR, X), \
1560 INSN_3(ALU, MUL, X), \
1561 INSN_3(ALU, MOV, X), \
1562 INSN_3(ALU, ARSH, X), \
1563 INSN_3(ALU, DIV, X), \
1564 INSN_3(ALU, MOD, X), \
1565 INSN_2(ALU, NEG), \
1566 INSN_3(ALU, END, TO_BE), \
1567 INSN_3(ALU, END, TO_LE), \
1568 /* Immediate based. */ \
1569 INSN_3(ALU, ADD, K), \
1570 INSN_3(ALU, SUB, K), \
1571 INSN_3(ALU, AND, K), \
1572 INSN_3(ALU, OR, K), \
1573 INSN_3(ALU, LSH, K), \
1574 INSN_3(ALU, RSH, K), \
1575 INSN_3(ALU, XOR, K), \
1576 INSN_3(ALU, MUL, K), \
1577 INSN_3(ALU, MOV, K), \
1578 INSN_3(ALU, ARSH, K), \
1579 INSN_3(ALU, DIV, K), \
1580 INSN_3(ALU, MOD, K), \
1581 /* 64 bit ALU operations. */ \
1582 /* Register based. */ \
1583 INSN_3(ALU64, ADD, X), \
1584 INSN_3(ALU64, SUB, X), \
1585 INSN_3(ALU64, AND, X), \
1586 INSN_3(ALU64, OR, X), \
1587 INSN_3(ALU64, LSH, X), \
1588 INSN_3(ALU64, RSH, X), \
1589 INSN_3(ALU64, XOR, X), \
1590 INSN_3(ALU64, MUL, X), \
1591 INSN_3(ALU64, MOV, X), \
1592 INSN_3(ALU64, ARSH, X), \
1593 INSN_3(ALU64, DIV, X), \
1594 INSN_3(ALU64, MOD, X), \
1595 INSN_2(ALU64, NEG), \
1596 INSN_3(ALU64, END, TO_LE), \
1597 /* Immediate based. */ \
1598 INSN_3(ALU64, ADD, K), \
1599 INSN_3(ALU64, SUB, K), \
1600 INSN_3(ALU64, AND, K), \
1601 INSN_3(ALU64, OR, K), \
1602 INSN_3(ALU64, LSH, K), \
1603 INSN_3(ALU64, RSH, K), \
1604 INSN_3(ALU64, XOR, K), \
1605 INSN_3(ALU64, MUL, K), \
1606 INSN_3(ALU64, MOV, K), \
1607 INSN_3(ALU64, ARSH, K), \
1608 INSN_3(ALU64, DIV, K), \
1609 INSN_3(ALU64, MOD, K), \
1610 /* Call instruction. */ \
1611 INSN_2(JMP, CALL), \
1612 /* Exit instruction. */ \
1613 INSN_2(JMP, EXIT), \
1614 /* 32-bit Jump instructions. */ \
1615 /* Register based. */ \
1616 INSN_3(JMP32, JEQ, X), \
1617 INSN_3(JMP32, JNE, X), \
1618 INSN_3(JMP32, JGT, X), \
1619 INSN_3(JMP32, JLT, X), \
1620 INSN_3(JMP32, JGE, X), \
1621 INSN_3(JMP32, JLE, X), \
1622 INSN_3(JMP32, JSGT, X), \
1623 INSN_3(JMP32, JSLT, X), \
1624 INSN_3(JMP32, JSGE, X), \
1625 INSN_3(JMP32, JSLE, X), \
1626 INSN_3(JMP32, JSET, X), \
1627 /* Immediate based. */ \
1628 INSN_3(JMP32, JEQ, K), \
1629 INSN_3(JMP32, JNE, K), \
1630 INSN_3(JMP32, JGT, K), \
1631 INSN_3(JMP32, JLT, K), \
1632 INSN_3(JMP32, JGE, K), \
1633 INSN_3(JMP32, JLE, K), \
1634 INSN_3(JMP32, JSGT, K), \
1635 INSN_3(JMP32, JSLT, K), \
1636 INSN_3(JMP32, JSGE, K), \
1637 INSN_3(JMP32, JSLE, K), \
1638 INSN_3(JMP32, JSET, K), \
1639 /* Jump instructions. */ \
1640 /* Register based. */ \
1641 INSN_3(JMP, JEQ, X), \
1642 INSN_3(JMP, JNE, X), \
1643 INSN_3(JMP, JGT, X), \
1644 INSN_3(JMP, JLT, X), \
1645 INSN_3(JMP, JGE, X), \
1646 INSN_3(JMP, JLE, X), \
1647 INSN_3(JMP, JSGT, X), \
1648 INSN_3(JMP, JSLT, X), \
1649 INSN_3(JMP, JSGE, X), \
1650 INSN_3(JMP, JSLE, X), \
1651 INSN_3(JMP, JSET, X), \
1652 /* Immediate based. */ \
1653 INSN_3(JMP, JEQ, K), \
1654 INSN_3(JMP, JNE, K), \
1655 INSN_3(JMP, JGT, K), \
1656 INSN_3(JMP, JLT, K), \
1657 INSN_3(JMP, JGE, K), \
1658 INSN_3(JMP, JLE, K), \
1659 INSN_3(JMP, JSGT, K), \
1660 INSN_3(JMP, JSLT, K), \
1661 INSN_3(JMP, JSGE, K), \
1662 INSN_3(JMP, JSLE, K), \
1663 INSN_3(JMP, JSET, K), \
1664 INSN_2(JMP, JA), \
1665 INSN_2(JMP32, JA), \
1666 /* Atomic operations. */ \
1667 INSN_3(STX, ATOMIC, B), \
1668 INSN_3(STX, ATOMIC, H), \
1669 INSN_3(STX, ATOMIC, W), \
1670 INSN_3(STX, ATOMIC, DW), \
1671 /* Store instructions. */ \
1672 /* Register based. */ \
1673 INSN_3(STX, MEM, B), \
1674 INSN_3(STX, MEM, H), \
1675 INSN_3(STX, MEM, W), \
1676 INSN_3(STX, MEM, DW), \
1677 /* Immediate based. */ \
1678 INSN_3(ST, MEM, B), \
1679 INSN_3(ST, MEM, H), \
1680 INSN_3(ST, MEM, W), \
1681 INSN_3(ST, MEM, DW), \
1682 /* Load instructions. */ \
1683 /* Register based. */ \
1684 INSN_3(LDX, MEM, B), \
1685 INSN_3(LDX, MEM, H), \
1686 INSN_3(LDX, MEM, W), \
1687 INSN_3(LDX, MEM, DW), \
1688 INSN_3(LDX, MEMSX, B), \
1689 INSN_3(LDX, MEMSX, H), \
1690 INSN_3(LDX, MEMSX, W), \
1691 /* Immediate based. */ \
1692 INSN_3(LD, IMM, DW)
1693
bpf_opcode_in_insntable(u8 code)1694 bool bpf_opcode_in_insntable(u8 code)
1695 {
1696 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1697 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1698 static const bool public_insntable[256] = {
1699 [0 ... 255] = false,
1700 /* Now overwrite non-defaults ... */
1701 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1702 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1703 [BPF_LD | BPF_ABS | BPF_B] = true,
1704 [BPF_LD | BPF_ABS | BPF_H] = true,
1705 [BPF_LD | BPF_ABS | BPF_W] = true,
1706 [BPF_LD | BPF_IND | BPF_B] = true,
1707 [BPF_LD | BPF_IND | BPF_H] = true,
1708 [BPF_LD | BPF_IND | BPF_W] = true,
1709 [BPF_JMP | BPF_JCOND] = true,
1710 };
1711 #undef BPF_INSN_3_TBL
1712 #undef BPF_INSN_2_TBL
1713 return public_insntable[code];
1714 }
1715
1716 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1717 /**
1718 * ___bpf_prog_run - run eBPF program on a given context
1719 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1720 * @insn: is the array of eBPF instructions
1721 *
1722 * Decode and execute eBPF instructions.
1723 *
1724 * Return: whatever value is in %BPF_R0 at program exit
1725 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1726 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1727 {
1728 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1729 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1730 static const void * const jumptable[256] __annotate_jump_table = {
1731 [0 ... 255] = &&default_label,
1732 /* Now overwrite non-defaults ... */
1733 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1734 /* Non-UAPI available opcodes. */
1735 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1736 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1737 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1738 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1739 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1740 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1741 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1742 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1743 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1744 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1745 };
1746 #undef BPF_INSN_3_LBL
1747 #undef BPF_INSN_2_LBL
1748 u32 tail_call_cnt = 0;
1749
1750 #define CONT ({ insn++; goto select_insn; })
1751 #define CONT_JMP ({ insn++; goto select_insn; })
1752
1753 select_insn:
1754 goto *jumptable[insn->code];
1755
1756 /* Explicitly mask the register-based shift amounts with 63 or 31
1757 * to avoid undefined behavior. Normally this won't affect the
1758 * generated code, for example, in case of native 64 bit archs such
1759 * as x86-64 or arm64, the compiler is optimizing the AND away for
1760 * the interpreter. In case of JITs, each of the JIT backends compiles
1761 * the BPF shift operations to machine instructions which produce
1762 * implementation-defined results in such a case; the resulting
1763 * contents of the register may be arbitrary, but program behaviour
1764 * as a whole remains defined. In other words, in case of JIT backends,
1765 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1766 */
1767 /* ALU (shifts) */
1768 #define SHT(OPCODE, OP) \
1769 ALU64_##OPCODE##_X: \
1770 DST = DST OP (SRC & 63); \
1771 CONT; \
1772 ALU_##OPCODE##_X: \
1773 DST = (u32) DST OP ((u32) SRC & 31); \
1774 CONT; \
1775 ALU64_##OPCODE##_K: \
1776 DST = DST OP IMM; \
1777 CONT; \
1778 ALU_##OPCODE##_K: \
1779 DST = (u32) DST OP (u32) IMM; \
1780 CONT;
1781 /* ALU (rest) */
1782 #define ALU(OPCODE, OP) \
1783 ALU64_##OPCODE##_X: \
1784 DST = DST OP SRC; \
1785 CONT; \
1786 ALU_##OPCODE##_X: \
1787 DST = (u32) DST OP (u32) SRC; \
1788 CONT; \
1789 ALU64_##OPCODE##_K: \
1790 DST = DST OP IMM; \
1791 CONT; \
1792 ALU_##OPCODE##_K: \
1793 DST = (u32) DST OP (u32) IMM; \
1794 CONT;
1795 ALU(ADD, +)
1796 ALU(SUB, -)
1797 ALU(AND, &)
1798 ALU(OR, |)
1799 ALU(XOR, ^)
1800 ALU(MUL, *)
1801 SHT(LSH, <<)
1802 SHT(RSH, >>)
1803 #undef SHT
1804 #undef ALU
1805 ALU_NEG:
1806 DST = (u32) -DST;
1807 CONT;
1808 ALU64_NEG:
1809 DST = -DST;
1810 CONT;
1811 ALU_MOV_X:
1812 switch (OFF) {
1813 case 0:
1814 DST = (u32) SRC;
1815 break;
1816 case 8:
1817 DST = (u32)(s8) SRC;
1818 break;
1819 case 16:
1820 DST = (u32)(s16) SRC;
1821 break;
1822 }
1823 CONT;
1824 ALU_MOV_K:
1825 DST = (u32) IMM;
1826 CONT;
1827 ALU64_MOV_X:
1828 switch (OFF) {
1829 case 0:
1830 DST = SRC;
1831 break;
1832 case 8:
1833 DST = (s8) SRC;
1834 break;
1835 case 16:
1836 DST = (s16) SRC;
1837 break;
1838 case 32:
1839 DST = (s32) SRC;
1840 break;
1841 }
1842 CONT;
1843 ALU64_MOV_K:
1844 DST = IMM;
1845 CONT;
1846 LD_IMM_DW:
1847 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1848 insn++;
1849 CONT;
1850 ALU_ARSH_X:
1851 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1852 CONT;
1853 ALU_ARSH_K:
1854 DST = (u64) (u32) (((s32) DST) >> IMM);
1855 CONT;
1856 ALU64_ARSH_X:
1857 (*(s64 *) &DST) >>= (SRC & 63);
1858 CONT;
1859 ALU64_ARSH_K:
1860 (*(s64 *) &DST) >>= IMM;
1861 CONT;
1862 ALU64_MOD_X:
1863 switch (OFF) {
1864 case 0:
1865 div64_u64_rem(DST, SRC, &AX);
1866 DST = AX;
1867 break;
1868 case 1:
1869 AX = div64_s64(DST, SRC);
1870 DST = DST - AX * SRC;
1871 break;
1872 }
1873 CONT;
1874 ALU_MOD_X:
1875 switch (OFF) {
1876 case 0:
1877 AX = (u32) DST;
1878 DST = do_div(AX, (u32) SRC);
1879 break;
1880 case 1:
1881 AX = abs((s32)DST);
1882 AX = do_div(AX, abs((s32)SRC));
1883 if ((s32)DST < 0)
1884 DST = (u32)-AX;
1885 else
1886 DST = (u32)AX;
1887 break;
1888 }
1889 CONT;
1890 ALU64_MOD_K:
1891 switch (OFF) {
1892 case 0:
1893 div64_u64_rem(DST, IMM, &AX);
1894 DST = AX;
1895 break;
1896 case 1:
1897 AX = div64_s64(DST, IMM);
1898 DST = DST - AX * IMM;
1899 break;
1900 }
1901 CONT;
1902 ALU_MOD_K:
1903 switch (OFF) {
1904 case 0:
1905 AX = (u32) DST;
1906 DST = do_div(AX, (u32) IMM);
1907 break;
1908 case 1:
1909 AX = abs((s32)DST);
1910 AX = do_div(AX, abs((s32)IMM));
1911 if ((s32)DST < 0)
1912 DST = (u32)-AX;
1913 else
1914 DST = (u32)AX;
1915 break;
1916 }
1917 CONT;
1918 ALU64_DIV_X:
1919 switch (OFF) {
1920 case 0:
1921 DST = div64_u64(DST, SRC);
1922 break;
1923 case 1:
1924 DST = div64_s64(DST, SRC);
1925 break;
1926 }
1927 CONT;
1928 ALU_DIV_X:
1929 switch (OFF) {
1930 case 0:
1931 AX = (u32) DST;
1932 do_div(AX, (u32) SRC);
1933 DST = (u32) AX;
1934 break;
1935 case 1:
1936 AX = abs((s32)DST);
1937 do_div(AX, abs((s32)SRC));
1938 if (((s32)DST < 0) == ((s32)SRC < 0))
1939 DST = (u32)AX;
1940 else
1941 DST = (u32)-AX;
1942 break;
1943 }
1944 CONT;
1945 ALU64_DIV_K:
1946 switch (OFF) {
1947 case 0:
1948 DST = div64_u64(DST, IMM);
1949 break;
1950 case 1:
1951 DST = div64_s64(DST, IMM);
1952 break;
1953 }
1954 CONT;
1955 ALU_DIV_K:
1956 switch (OFF) {
1957 case 0:
1958 AX = (u32) DST;
1959 do_div(AX, (u32) IMM);
1960 DST = (u32) AX;
1961 break;
1962 case 1:
1963 AX = abs((s32)DST);
1964 do_div(AX, abs((s32)IMM));
1965 if (((s32)DST < 0) == ((s32)IMM < 0))
1966 DST = (u32)AX;
1967 else
1968 DST = (u32)-AX;
1969 break;
1970 }
1971 CONT;
1972 ALU_END_TO_BE:
1973 switch (IMM) {
1974 case 16:
1975 DST = (__force u16) cpu_to_be16(DST);
1976 break;
1977 case 32:
1978 DST = (__force u32) cpu_to_be32(DST);
1979 break;
1980 case 64:
1981 DST = (__force u64) cpu_to_be64(DST);
1982 break;
1983 }
1984 CONT;
1985 ALU_END_TO_LE:
1986 switch (IMM) {
1987 case 16:
1988 DST = (__force u16) cpu_to_le16(DST);
1989 break;
1990 case 32:
1991 DST = (__force u32) cpu_to_le32(DST);
1992 break;
1993 case 64:
1994 DST = (__force u64) cpu_to_le64(DST);
1995 break;
1996 }
1997 CONT;
1998 ALU64_END_TO_LE:
1999 switch (IMM) {
2000 case 16:
2001 DST = (__force u16) __swab16(DST);
2002 break;
2003 case 32:
2004 DST = (__force u32) __swab32(DST);
2005 break;
2006 case 64:
2007 DST = (__force u64) __swab64(DST);
2008 break;
2009 }
2010 CONT;
2011
2012 /* CALL */
2013 JMP_CALL:
2014 /* Function call scratches BPF_R1-BPF_R5 registers,
2015 * preserves BPF_R6-BPF_R9, and stores return value
2016 * into BPF_R0.
2017 */
2018 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2019 BPF_R4, BPF_R5);
2020 CONT;
2021
2022 JMP_CALL_ARGS:
2023 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2024 BPF_R3, BPF_R4,
2025 BPF_R5,
2026 insn + insn->off + 1);
2027 CONT;
2028
2029 JMP_TAIL_CALL: {
2030 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2031 struct bpf_array *array = container_of(map, struct bpf_array, map);
2032 struct bpf_prog *prog;
2033 u32 index = BPF_R3;
2034
2035 if (unlikely(index >= array->map.max_entries))
2036 goto out;
2037
2038 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2039 goto out;
2040
2041 tail_call_cnt++;
2042
2043 prog = READ_ONCE(array->ptrs[index]);
2044 if (!prog)
2045 goto out;
2046
2047 /* ARG1 at this point is guaranteed to point to CTX from
2048 * the verifier side due to the fact that the tail call is
2049 * handled like a helper, that is, bpf_tail_call_proto,
2050 * where arg1_type is ARG_PTR_TO_CTX.
2051 */
2052 insn = prog->insnsi;
2053 goto select_insn;
2054 out:
2055 CONT;
2056 }
2057 JMP_JA:
2058 insn += insn->off;
2059 CONT;
2060 JMP32_JA:
2061 insn += insn->imm;
2062 CONT;
2063 JMP_EXIT:
2064 return BPF_R0;
2065 /* JMP */
2066 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2067 JMP_##OPCODE##_X: \
2068 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2069 insn += insn->off; \
2070 CONT_JMP; \
2071 } \
2072 CONT; \
2073 JMP32_##OPCODE##_X: \
2074 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2075 insn += insn->off; \
2076 CONT_JMP; \
2077 } \
2078 CONT; \
2079 JMP_##OPCODE##_K: \
2080 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2081 insn += insn->off; \
2082 CONT_JMP; \
2083 } \
2084 CONT; \
2085 JMP32_##OPCODE##_K: \
2086 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2087 insn += insn->off; \
2088 CONT_JMP; \
2089 } \
2090 CONT;
2091 COND_JMP(u, JEQ, ==)
2092 COND_JMP(u, JNE, !=)
2093 COND_JMP(u, JGT, >)
2094 COND_JMP(u, JLT, <)
2095 COND_JMP(u, JGE, >=)
2096 COND_JMP(u, JLE, <=)
2097 COND_JMP(u, JSET, &)
2098 COND_JMP(s, JSGT, >)
2099 COND_JMP(s, JSLT, <)
2100 COND_JMP(s, JSGE, >=)
2101 COND_JMP(s, JSLE, <=)
2102 #undef COND_JMP
2103 /* ST, STX and LDX*/
2104 ST_NOSPEC:
2105 /* Speculation barrier for mitigating Speculative Store Bypass.
2106 * In case of arm64, we rely on the firmware mitigation as
2107 * controlled via the ssbd kernel parameter. Whenever the
2108 * mitigation is enabled, it works for all of the kernel code
2109 * with no need to provide any additional instructions here.
2110 * In case of x86, we use 'lfence' insn for mitigation. We
2111 * reuse preexisting logic from Spectre v1 mitigation that
2112 * happens to produce the required code on x86 for v4 as well.
2113 */
2114 barrier_nospec();
2115 CONT;
2116 #define LDST(SIZEOP, SIZE) \
2117 STX_MEM_##SIZEOP: \
2118 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2119 CONT; \
2120 ST_MEM_##SIZEOP: \
2121 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2122 CONT; \
2123 LDX_MEM_##SIZEOP: \
2124 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2125 CONT; \
2126 LDX_PROBE_MEM_##SIZEOP: \
2127 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2128 (const void *)(long) (SRC + insn->off)); \
2129 DST = *((SIZE *)&DST); \
2130 CONT;
2131
2132 LDST(B, u8)
2133 LDST(H, u16)
2134 LDST(W, u32)
2135 LDST(DW, u64)
2136 #undef LDST
2137
2138 #define LDSX(SIZEOP, SIZE) \
2139 LDX_MEMSX_##SIZEOP: \
2140 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2141 CONT; \
2142 LDX_PROBE_MEMSX_##SIZEOP: \
2143 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2144 (const void *)(long) (SRC + insn->off)); \
2145 DST = *((SIZE *)&DST); \
2146 CONT;
2147
2148 LDSX(B, s8)
2149 LDSX(H, s16)
2150 LDSX(W, s32)
2151 #undef LDSX
2152
2153 #define ATOMIC_ALU_OP(BOP, KOP) \
2154 case BOP: \
2155 if (BPF_SIZE(insn->code) == BPF_W) \
2156 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2157 (DST + insn->off)); \
2158 else if (BPF_SIZE(insn->code) == BPF_DW) \
2159 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2160 (DST + insn->off)); \
2161 else \
2162 goto default_label; \
2163 break; \
2164 case BOP | BPF_FETCH: \
2165 if (BPF_SIZE(insn->code) == BPF_W) \
2166 SRC = (u32) atomic_fetch_##KOP( \
2167 (u32) SRC, \
2168 (atomic_t *)(unsigned long) (DST + insn->off)); \
2169 else if (BPF_SIZE(insn->code) == BPF_DW) \
2170 SRC = (u64) atomic64_fetch_##KOP( \
2171 (u64) SRC, \
2172 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2173 else \
2174 goto default_label; \
2175 break;
2176
2177 STX_ATOMIC_DW:
2178 STX_ATOMIC_W:
2179 STX_ATOMIC_H:
2180 STX_ATOMIC_B:
2181 switch (IMM) {
2182 /* Atomic read-modify-write instructions support only W and DW
2183 * size modifiers.
2184 */
2185 ATOMIC_ALU_OP(BPF_ADD, add)
2186 ATOMIC_ALU_OP(BPF_AND, and)
2187 ATOMIC_ALU_OP(BPF_OR, or)
2188 ATOMIC_ALU_OP(BPF_XOR, xor)
2189 #undef ATOMIC_ALU_OP
2190
2191 case BPF_XCHG:
2192 if (BPF_SIZE(insn->code) == BPF_W)
2193 SRC = (u32) atomic_xchg(
2194 (atomic_t *)(unsigned long) (DST + insn->off),
2195 (u32) SRC);
2196 else if (BPF_SIZE(insn->code) == BPF_DW)
2197 SRC = (u64) atomic64_xchg(
2198 (atomic64_t *)(unsigned long) (DST + insn->off),
2199 (u64) SRC);
2200 else
2201 goto default_label;
2202 break;
2203 case BPF_CMPXCHG:
2204 if (BPF_SIZE(insn->code) == BPF_W)
2205 BPF_R0 = (u32) atomic_cmpxchg(
2206 (atomic_t *)(unsigned long) (DST + insn->off),
2207 (u32) BPF_R0, (u32) SRC);
2208 else if (BPF_SIZE(insn->code) == BPF_DW)
2209 BPF_R0 = (u64) atomic64_cmpxchg(
2210 (atomic64_t *)(unsigned long) (DST + insn->off),
2211 (u64) BPF_R0, (u64) SRC);
2212 else
2213 goto default_label;
2214 break;
2215 /* Atomic load and store instructions support all size
2216 * modifiers.
2217 */
2218 case BPF_LOAD_ACQ:
2219 switch (BPF_SIZE(insn->code)) {
2220 #define LOAD_ACQUIRE(SIZEOP, SIZE) \
2221 case BPF_##SIZEOP: \
2222 DST = (SIZE)smp_load_acquire( \
2223 (SIZE *)(unsigned long)(SRC + insn->off)); \
2224 break;
2225 LOAD_ACQUIRE(B, u8)
2226 LOAD_ACQUIRE(H, u16)
2227 LOAD_ACQUIRE(W, u32)
2228 #ifdef CONFIG_64BIT
2229 LOAD_ACQUIRE(DW, u64)
2230 #endif
2231 #undef LOAD_ACQUIRE
2232 default:
2233 goto default_label;
2234 }
2235 break;
2236 case BPF_STORE_REL:
2237 switch (BPF_SIZE(insn->code)) {
2238 #define STORE_RELEASE(SIZEOP, SIZE) \
2239 case BPF_##SIZEOP: \
2240 smp_store_release( \
2241 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \
2242 break;
2243 STORE_RELEASE(B, u8)
2244 STORE_RELEASE(H, u16)
2245 STORE_RELEASE(W, u32)
2246 #ifdef CONFIG_64BIT
2247 STORE_RELEASE(DW, u64)
2248 #endif
2249 #undef STORE_RELEASE
2250 default:
2251 goto default_label;
2252 }
2253 break;
2254
2255 default:
2256 goto default_label;
2257 }
2258 CONT;
2259
2260 default_label:
2261 /* If we ever reach this, we have a bug somewhere. Die hard here
2262 * instead of just returning 0; we could be somewhere in a subprog,
2263 * so execution could continue otherwise which we do /not/ want.
2264 *
2265 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2266 */
2267 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2268 insn->code, insn->imm);
2269 BUG_ON(1);
2270 return 0;
2271 }
2272
2273 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2274 #define DEFINE_BPF_PROG_RUN(stack_size) \
2275 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2276 { \
2277 u64 stack[stack_size / sizeof(u64)]; \
2278 u64 regs[MAX_BPF_EXT_REG] = {}; \
2279 \
2280 kmsan_unpoison_memory(stack, sizeof(stack)); \
2281 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2282 ARG1 = (u64) (unsigned long) ctx; \
2283 return ___bpf_prog_run(regs, insn); \
2284 }
2285
2286 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2287 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2288 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2289 const struct bpf_insn *insn) \
2290 { \
2291 u64 stack[stack_size / sizeof(u64)]; \
2292 u64 regs[MAX_BPF_EXT_REG]; \
2293 \
2294 kmsan_unpoison_memory(stack, sizeof(stack)); \
2295 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2296 BPF_R1 = r1; \
2297 BPF_R2 = r2; \
2298 BPF_R3 = r3; \
2299 BPF_R4 = r4; \
2300 BPF_R5 = r5; \
2301 return ___bpf_prog_run(regs, insn); \
2302 }
2303
2304 #define EVAL1(FN, X) FN(X)
2305 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2306 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2307 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2308 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2309 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2310
2311 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2312 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2313 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2314
2315 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2316 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2317 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2318
2319 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2320
2321 static unsigned int (*interpreters[])(const void *ctx,
2322 const struct bpf_insn *insn) = {
2323 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2324 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2325 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2326 };
2327 #undef PROG_NAME_LIST
2328 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2329 static __maybe_unused
2330 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2331 const struct bpf_insn *insn) = {
2332 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2333 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2334 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2335 };
2336 #undef PROG_NAME_LIST
2337
2338 #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2339 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2340 {
2341 stack_depth = max_t(u32, stack_depth, 1);
2342 insn->off = (s16) insn->imm;
2343 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2344 __bpf_call_base_args;
2345 insn->code = BPF_JMP | BPF_CALL_ARGS;
2346 }
2347 #endif
2348 #endif
2349
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2350 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2351 const struct bpf_insn *insn)
2352 {
2353 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2354 * is not working properly, or interpreter is being used when
2355 * prog->jit_requested is not 0, so warn about it!
2356 */
2357 WARN_ON_ONCE(1);
2358 return 0;
2359 }
2360
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2361 bool bpf_prog_map_compatible(struct bpf_map *map,
2362 const struct bpf_prog *fp)
2363 {
2364 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2365 bool ret;
2366 struct bpf_prog_aux *aux = fp->aux;
2367
2368 if (fp->kprobe_override)
2369 return false;
2370
2371 /* XDP programs inserted into maps are not guaranteed to run on
2372 * a particular netdev (and can run outside driver context entirely
2373 * in the case of devmap and cpumap). Until device checks
2374 * are implemented, prohibit adding dev-bound programs to program maps.
2375 */
2376 if (bpf_prog_is_dev_bound(aux))
2377 return false;
2378
2379 spin_lock(&map->owner.lock);
2380 if (!map->owner.type) {
2381 /* There's no owner yet where we could check for
2382 * compatibility.
2383 */
2384 map->owner.type = prog_type;
2385 map->owner.jited = fp->jited;
2386 map->owner.xdp_has_frags = aux->xdp_has_frags;
2387 map->owner.attach_func_proto = aux->attach_func_proto;
2388 ret = true;
2389 } else {
2390 ret = map->owner.type == prog_type &&
2391 map->owner.jited == fp->jited &&
2392 map->owner.xdp_has_frags == aux->xdp_has_frags;
2393 if (ret &&
2394 map->owner.attach_func_proto != aux->attach_func_proto) {
2395 switch (prog_type) {
2396 case BPF_PROG_TYPE_TRACING:
2397 case BPF_PROG_TYPE_LSM:
2398 case BPF_PROG_TYPE_EXT:
2399 case BPF_PROG_TYPE_STRUCT_OPS:
2400 ret = false;
2401 break;
2402 default:
2403 break;
2404 }
2405 }
2406 }
2407 spin_unlock(&map->owner.lock);
2408
2409 return ret;
2410 }
2411
bpf_check_tail_call(const struct bpf_prog * fp)2412 static int bpf_check_tail_call(const struct bpf_prog *fp)
2413 {
2414 struct bpf_prog_aux *aux = fp->aux;
2415 int i, ret = 0;
2416
2417 mutex_lock(&aux->used_maps_mutex);
2418 for (i = 0; i < aux->used_map_cnt; i++) {
2419 struct bpf_map *map = aux->used_maps[i];
2420
2421 if (!map_type_contains_progs(map))
2422 continue;
2423
2424 if (!bpf_prog_map_compatible(map, fp)) {
2425 ret = -EINVAL;
2426 goto out;
2427 }
2428 }
2429
2430 out:
2431 mutex_unlock(&aux->used_maps_mutex);
2432 return ret;
2433 }
2434
bpf_prog_select_func(struct bpf_prog * fp)2435 static void bpf_prog_select_func(struct bpf_prog *fp)
2436 {
2437 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2438 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2439 u32 idx = (round_up(stack_depth, 32) / 32) - 1;
2440
2441 /* may_goto may cause stack size > 512, leading to idx out-of-bounds.
2442 * But for non-JITed programs, we don't need bpf_func, so no bounds
2443 * check needed.
2444 */
2445 if (!fp->jit_requested &&
2446 !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
2447 fp->bpf_func = interpreters[idx];
2448 } else {
2449 fp->bpf_func = __bpf_prog_ret0_warn;
2450 }
2451 #else
2452 fp->bpf_func = __bpf_prog_ret0_warn;
2453 #endif
2454 }
2455
2456 /**
2457 * bpf_prog_select_runtime - select exec runtime for BPF program
2458 * @fp: bpf_prog populated with BPF program
2459 * @err: pointer to error variable
2460 *
2461 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2462 * The BPF program will be executed via bpf_prog_run() function.
2463 *
2464 * Return: the &fp argument along with &err set to 0 for success or
2465 * a negative errno code on failure
2466 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2467 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2468 {
2469 /* In case of BPF to BPF calls, verifier did all the prep
2470 * work with regards to JITing, etc.
2471 */
2472 bool jit_needed = false;
2473
2474 if (fp->bpf_func)
2475 goto finalize;
2476
2477 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2478 bpf_prog_has_kfunc_call(fp))
2479 jit_needed = true;
2480
2481 bpf_prog_select_func(fp);
2482
2483 /* eBPF JITs can rewrite the program in case constant
2484 * blinding is active. However, in case of error during
2485 * blinding, bpf_int_jit_compile() must always return a
2486 * valid program, which in this case would simply not
2487 * be JITed, but falls back to the interpreter.
2488 */
2489 if (!bpf_prog_is_offloaded(fp->aux)) {
2490 *err = bpf_prog_alloc_jited_linfo(fp);
2491 if (*err)
2492 return fp;
2493
2494 fp = bpf_int_jit_compile(fp);
2495 bpf_prog_jit_attempt_done(fp);
2496 if (!fp->jited && jit_needed) {
2497 *err = -ENOTSUPP;
2498 return fp;
2499 }
2500 } else {
2501 *err = bpf_prog_offload_compile(fp);
2502 if (*err)
2503 return fp;
2504 }
2505
2506 finalize:
2507 *err = bpf_prog_lock_ro(fp);
2508 if (*err)
2509 return fp;
2510
2511 /* The tail call compatibility check can only be done at
2512 * this late stage as we need to determine, if we deal
2513 * with JITed or non JITed program concatenations and not
2514 * all eBPF JITs might immediately support all features.
2515 */
2516 *err = bpf_check_tail_call(fp);
2517
2518 return fp;
2519 }
2520 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2521
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2522 static unsigned int __bpf_prog_ret1(const void *ctx,
2523 const struct bpf_insn *insn)
2524 {
2525 return 1;
2526 }
2527
2528 static struct bpf_prog_dummy {
2529 struct bpf_prog prog;
2530 } dummy_bpf_prog = {
2531 .prog = {
2532 .bpf_func = __bpf_prog_ret1,
2533 },
2534 };
2535
2536 struct bpf_empty_prog_array bpf_empty_prog_array = {
2537 .null_prog = NULL,
2538 };
2539 EXPORT_SYMBOL(bpf_empty_prog_array);
2540
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2541 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2542 {
2543 struct bpf_prog_array *p;
2544
2545 if (prog_cnt)
2546 p = kzalloc(struct_size(p, items, prog_cnt + 1), flags);
2547 else
2548 p = &bpf_empty_prog_array.hdr;
2549
2550 return p;
2551 }
2552
bpf_prog_array_free(struct bpf_prog_array * progs)2553 void bpf_prog_array_free(struct bpf_prog_array *progs)
2554 {
2555 if (!progs || progs == &bpf_empty_prog_array.hdr)
2556 return;
2557 kfree_rcu(progs, rcu);
2558 }
2559
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2560 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2561 {
2562 struct bpf_prog_array *progs;
2563
2564 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2565 * no need to call kfree_rcu(), just call kfree() directly.
2566 */
2567 progs = container_of(rcu, struct bpf_prog_array, rcu);
2568 if (rcu_trace_implies_rcu_gp())
2569 kfree(progs);
2570 else
2571 kfree_rcu(progs, rcu);
2572 }
2573
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2574 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2575 {
2576 if (!progs || progs == &bpf_empty_prog_array.hdr)
2577 return;
2578 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2579 }
2580
bpf_prog_array_length(struct bpf_prog_array * array)2581 int bpf_prog_array_length(struct bpf_prog_array *array)
2582 {
2583 struct bpf_prog_array_item *item;
2584 u32 cnt = 0;
2585
2586 for (item = array->items; item->prog; item++)
2587 if (item->prog != &dummy_bpf_prog.prog)
2588 cnt++;
2589 return cnt;
2590 }
2591
bpf_prog_array_is_empty(struct bpf_prog_array * array)2592 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2593 {
2594 struct bpf_prog_array_item *item;
2595
2596 for (item = array->items; item->prog; item++)
2597 if (item->prog != &dummy_bpf_prog.prog)
2598 return false;
2599 return true;
2600 }
2601
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2602 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2603 u32 *prog_ids,
2604 u32 request_cnt)
2605 {
2606 struct bpf_prog_array_item *item;
2607 int i = 0;
2608
2609 for (item = array->items; item->prog; item++) {
2610 if (item->prog == &dummy_bpf_prog.prog)
2611 continue;
2612 prog_ids[i] = item->prog->aux->id;
2613 if (++i == request_cnt) {
2614 item++;
2615 break;
2616 }
2617 }
2618
2619 return !!(item->prog);
2620 }
2621
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2622 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2623 __u32 __user *prog_ids, u32 cnt)
2624 {
2625 unsigned long err = 0;
2626 bool nospc;
2627 u32 *ids;
2628
2629 /* users of this function are doing:
2630 * cnt = bpf_prog_array_length();
2631 * if (cnt > 0)
2632 * bpf_prog_array_copy_to_user(..., cnt);
2633 * so below kcalloc doesn't need extra cnt > 0 check.
2634 */
2635 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2636 if (!ids)
2637 return -ENOMEM;
2638 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2639 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2640 kfree(ids);
2641 if (err)
2642 return -EFAULT;
2643 if (nospc)
2644 return -ENOSPC;
2645 return 0;
2646 }
2647
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2648 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2649 struct bpf_prog *old_prog)
2650 {
2651 struct bpf_prog_array_item *item;
2652
2653 for (item = array->items; item->prog; item++)
2654 if (item->prog == old_prog) {
2655 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2656 break;
2657 }
2658 }
2659
2660 /**
2661 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2662 * index into the program array with
2663 * a dummy no-op program.
2664 * @array: a bpf_prog_array
2665 * @index: the index of the program to replace
2666 *
2667 * Skips over dummy programs, by not counting them, when calculating
2668 * the position of the program to replace.
2669 *
2670 * Return:
2671 * * 0 - Success
2672 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2673 * * -ENOENT - Index out of range
2674 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2675 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2676 {
2677 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2678 }
2679
2680 /**
2681 * bpf_prog_array_update_at() - Updates the program at the given index
2682 * into the program array.
2683 * @array: a bpf_prog_array
2684 * @index: the index of the program to update
2685 * @prog: the program to insert into the array
2686 *
2687 * Skips over dummy programs, by not counting them, when calculating
2688 * the position of the program to update.
2689 *
2690 * Return:
2691 * * 0 - Success
2692 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2693 * * -ENOENT - Index out of range
2694 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2695 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2696 struct bpf_prog *prog)
2697 {
2698 struct bpf_prog_array_item *item;
2699
2700 if (unlikely(index < 0))
2701 return -EINVAL;
2702
2703 for (item = array->items; item->prog; item++) {
2704 if (item->prog == &dummy_bpf_prog.prog)
2705 continue;
2706 if (!index) {
2707 WRITE_ONCE(item->prog, prog);
2708 return 0;
2709 }
2710 index--;
2711 }
2712 return -ENOENT;
2713 }
2714
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2715 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2716 struct bpf_prog *exclude_prog,
2717 struct bpf_prog *include_prog,
2718 u64 bpf_cookie,
2719 struct bpf_prog_array **new_array)
2720 {
2721 int new_prog_cnt, carry_prog_cnt = 0;
2722 struct bpf_prog_array_item *existing, *new;
2723 struct bpf_prog_array *array;
2724 bool found_exclude = false;
2725
2726 /* Figure out how many existing progs we need to carry over to
2727 * the new array.
2728 */
2729 if (old_array) {
2730 existing = old_array->items;
2731 for (; existing->prog; existing++) {
2732 if (existing->prog == exclude_prog) {
2733 found_exclude = true;
2734 continue;
2735 }
2736 if (existing->prog != &dummy_bpf_prog.prog)
2737 carry_prog_cnt++;
2738 if (existing->prog == include_prog)
2739 return -EEXIST;
2740 }
2741 }
2742
2743 if (exclude_prog && !found_exclude)
2744 return -ENOENT;
2745
2746 /* How many progs (not NULL) will be in the new array? */
2747 new_prog_cnt = carry_prog_cnt;
2748 if (include_prog)
2749 new_prog_cnt += 1;
2750
2751 /* Do we have any prog (not NULL) in the new array? */
2752 if (!new_prog_cnt) {
2753 *new_array = NULL;
2754 return 0;
2755 }
2756
2757 /* +1 as the end of prog_array is marked with NULL */
2758 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2759 if (!array)
2760 return -ENOMEM;
2761 new = array->items;
2762
2763 /* Fill in the new prog array */
2764 if (carry_prog_cnt) {
2765 existing = old_array->items;
2766 for (; existing->prog; existing++) {
2767 if (existing->prog == exclude_prog ||
2768 existing->prog == &dummy_bpf_prog.prog)
2769 continue;
2770
2771 new->prog = existing->prog;
2772 new->bpf_cookie = existing->bpf_cookie;
2773 new++;
2774 }
2775 }
2776 if (include_prog) {
2777 new->prog = include_prog;
2778 new->bpf_cookie = bpf_cookie;
2779 new++;
2780 }
2781 new->prog = NULL;
2782 *new_array = array;
2783 return 0;
2784 }
2785
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2786 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2787 u32 *prog_ids, u32 request_cnt,
2788 u32 *prog_cnt)
2789 {
2790 u32 cnt = 0;
2791
2792 if (array)
2793 cnt = bpf_prog_array_length(array);
2794
2795 *prog_cnt = cnt;
2796
2797 /* return early if user requested only program count or nothing to copy */
2798 if (!request_cnt || !cnt)
2799 return 0;
2800
2801 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2802 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2803 : 0;
2804 }
2805
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2806 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2807 struct bpf_map **used_maps, u32 len)
2808 {
2809 struct bpf_map *map;
2810 bool sleepable;
2811 u32 i;
2812
2813 sleepable = aux->prog->sleepable;
2814 for (i = 0; i < len; i++) {
2815 map = used_maps[i];
2816 if (map->ops->map_poke_untrack)
2817 map->ops->map_poke_untrack(map, aux);
2818 if (sleepable)
2819 atomic64_dec(&map->sleepable_refcnt);
2820 bpf_map_put(map);
2821 }
2822 }
2823
bpf_free_used_maps(struct bpf_prog_aux * aux)2824 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2825 {
2826 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2827 kfree(aux->used_maps);
2828 }
2829
__bpf_free_used_btfs(struct btf_mod_pair * used_btfs,u32 len)2830 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2831 {
2832 #ifdef CONFIG_BPF_SYSCALL
2833 struct btf_mod_pair *btf_mod;
2834 u32 i;
2835
2836 for (i = 0; i < len; i++) {
2837 btf_mod = &used_btfs[i];
2838 if (btf_mod->module)
2839 module_put(btf_mod->module);
2840 btf_put(btf_mod->btf);
2841 }
2842 #endif
2843 }
2844
bpf_free_used_btfs(struct bpf_prog_aux * aux)2845 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2846 {
2847 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2848 kfree(aux->used_btfs);
2849 }
2850
bpf_prog_free_deferred(struct work_struct * work)2851 static void bpf_prog_free_deferred(struct work_struct *work)
2852 {
2853 struct bpf_prog_aux *aux;
2854 int i;
2855
2856 aux = container_of(work, struct bpf_prog_aux, work);
2857 #ifdef CONFIG_BPF_SYSCALL
2858 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2859 #endif
2860 #ifdef CONFIG_CGROUP_BPF
2861 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2862 bpf_cgroup_atype_put(aux->cgroup_atype);
2863 #endif
2864 bpf_free_used_maps(aux);
2865 bpf_free_used_btfs(aux);
2866 if (bpf_prog_is_dev_bound(aux))
2867 bpf_prog_dev_bound_destroy(aux->prog);
2868 #ifdef CONFIG_PERF_EVENTS
2869 if (aux->prog->has_callchain_buf)
2870 put_callchain_buffers();
2871 #endif
2872 if (aux->dst_trampoline)
2873 bpf_trampoline_put(aux->dst_trampoline);
2874 for (i = 0; i < aux->real_func_cnt; i++) {
2875 /* We can just unlink the subprog poke descriptor table as
2876 * it was originally linked to the main program and is also
2877 * released along with it.
2878 */
2879 aux->func[i]->aux->poke_tab = NULL;
2880 bpf_jit_free(aux->func[i]);
2881 }
2882 if (aux->real_func_cnt) {
2883 kfree(aux->func);
2884 bpf_prog_unlock_free(aux->prog);
2885 } else {
2886 bpf_jit_free(aux->prog);
2887 }
2888 }
2889
bpf_prog_free(struct bpf_prog * fp)2890 void bpf_prog_free(struct bpf_prog *fp)
2891 {
2892 struct bpf_prog_aux *aux = fp->aux;
2893
2894 if (aux->dst_prog)
2895 bpf_prog_put(aux->dst_prog);
2896 bpf_token_put(aux->token);
2897 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2898 schedule_work(&aux->work);
2899 }
2900 EXPORT_SYMBOL_GPL(bpf_prog_free);
2901
2902 /* RNG for unprivileged user space with separated state from prandom_u32(). */
2903 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2904
bpf_user_rnd_init_once(void)2905 void bpf_user_rnd_init_once(void)
2906 {
2907 prandom_init_once(&bpf_user_rnd_state);
2908 }
2909
BPF_CALL_0(bpf_user_rnd_u32)2910 BPF_CALL_0(bpf_user_rnd_u32)
2911 {
2912 /* Should someone ever have the rather unwise idea to use some
2913 * of the registers passed into this function, then note that
2914 * this function is called from native eBPF and classic-to-eBPF
2915 * transformations. Register assignments from both sides are
2916 * different, f.e. classic always sets fn(ctx, A, X) here.
2917 */
2918 struct rnd_state *state;
2919 u32 res;
2920
2921 state = &get_cpu_var(bpf_user_rnd_state);
2922 res = prandom_u32_state(state);
2923 put_cpu_var(bpf_user_rnd_state);
2924
2925 return res;
2926 }
2927
BPF_CALL_0(bpf_get_raw_cpu_id)2928 BPF_CALL_0(bpf_get_raw_cpu_id)
2929 {
2930 return raw_smp_processor_id();
2931 }
2932
2933 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2934 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2935 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2936 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2937 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2938 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2939 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2940 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2941 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2942 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2943 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2944
2945 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2946 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2947 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2948 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2949 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2950 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2951 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2952
2953 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2954 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2955 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2956 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2957 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2958 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2959 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2960 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2961 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2962 const struct bpf_func_proto bpf_set_retval_proto __weak;
2963 const struct bpf_func_proto bpf_get_retval_proto __weak;
2964
bpf_get_trace_printk_proto(void)2965 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2966 {
2967 return NULL;
2968 }
2969
bpf_get_trace_vprintk_proto(void)2970 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2971 {
2972 return NULL;
2973 }
2974
bpf_get_perf_event_read_value_proto(void)2975 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void)
2976 {
2977 return NULL;
2978 }
2979
2980 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2981 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2982 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2983 {
2984 return -ENOTSUPP;
2985 }
2986 EXPORT_SYMBOL_GPL(bpf_event_output);
2987
2988 /* Always built-in helper functions. */
2989 const struct bpf_func_proto bpf_tail_call_proto = {
2990 .func = NULL,
2991 .gpl_only = false,
2992 .ret_type = RET_VOID,
2993 .arg1_type = ARG_PTR_TO_CTX,
2994 .arg2_type = ARG_CONST_MAP_PTR,
2995 .arg3_type = ARG_ANYTHING,
2996 };
2997
2998 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2999 * It is encouraged to implement bpf_int_jit_compile() instead, so that
3000 * eBPF and implicitly also cBPF can get JITed!
3001 */
bpf_int_jit_compile(struct bpf_prog * prog)3002 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3003 {
3004 return prog;
3005 }
3006
3007 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
3008 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
3009 */
bpf_jit_compile(struct bpf_prog * prog)3010 void __weak bpf_jit_compile(struct bpf_prog *prog)
3011 {
3012 }
3013
bpf_helper_changes_pkt_data(enum bpf_func_id func_id)3014 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
3015 {
3016 return false;
3017 }
3018
3019 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3020 * analysis code and wants explicit zero extension inserted by verifier.
3021 * Otherwise, return FALSE.
3022 *
3023 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
3024 * you don't override this. JITs that don't want these extra insns can detect
3025 * them using insn_is_zext.
3026 */
bpf_jit_needs_zext(void)3027 bool __weak bpf_jit_needs_zext(void)
3028 {
3029 return false;
3030 }
3031
3032 /* Return true if the JIT inlines the call to the helper corresponding to
3033 * the imm.
3034 *
3035 * The verifier will not patch the insn->imm for the call to the helper if
3036 * this returns true.
3037 */
bpf_jit_inlines_helper_call(s32 imm)3038 bool __weak bpf_jit_inlines_helper_call(s32 imm)
3039 {
3040 return false;
3041 }
3042
3043 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3044 bool __weak bpf_jit_supports_subprog_tailcalls(void)
3045 {
3046 return false;
3047 }
3048
bpf_jit_supports_percpu_insn(void)3049 bool __weak bpf_jit_supports_percpu_insn(void)
3050 {
3051 return false;
3052 }
3053
bpf_jit_supports_kfunc_call(void)3054 bool __weak bpf_jit_supports_kfunc_call(void)
3055 {
3056 return false;
3057 }
3058
bpf_jit_supports_far_kfunc_call(void)3059 bool __weak bpf_jit_supports_far_kfunc_call(void)
3060 {
3061 return false;
3062 }
3063
bpf_jit_supports_arena(void)3064 bool __weak bpf_jit_supports_arena(void)
3065 {
3066 return false;
3067 }
3068
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3069 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3070 {
3071 return false;
3072 }
3073
bpf_arch_uaddress_limit(void)3074 u64 __weak bpf_arch_uaddress_limit(void)
3075 {
3076 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3077 return TASK_SIZE;
3078 #else
3079 return 0;
3080 #endif
3081 }
3082
3083 /* Return TRUE if the JIT backend satisfies the following two conditions:
3084 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3085 * 2) Under the specific arch, the implementation of xchg() is the same
3086 * as atomic_xchg() on pointer-sized words.
3087 */
bpf_jit_supports_ptr_xchg(void)3088 bool __weak bpf_jit_supports_ptr_xchg(void)
3089 {
3090 return false;
3091 }
3092
3093 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3094 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3095 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)3096 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3097 int len)
3098 {
3099 return -EFAULT;
3100 }
3101
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)3102 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3103 void *addr1, void *addr2)
3104 {
3105 return -ENOTSUPP;
3106 }
3107
bpf_arch_text_copy(void * dst,void * src,size_t len)3108 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3109 {
3110 return ERR_PTR(-ENOTSUPP);
3111 }
3112
bpf_arch_text_invalidate(void * dst,size_t len)3113 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3114 {
3115 return -ENOTSUPP;
3116 }
3117
bpf_jit_supports_exceptions(void)3118 bool __weak bpf_jit_supports_exceptions(void)
3119 {
3120 return false;
3121 }
3122
bpf_jit_supports_private_stack(void)3123 bool __weak bpf_jit_supports_private_stack(void)
3124 {
3125 return false;
3126 }
3127
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3128 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3129 {
3130 }
3131
bpf_jit_supports_timed_may_goto(void)3132 bool __weak bpf_jit_supports_timed_may_goto(void)
3133 {
3134 return false;
3135 }
3136
arch_bpf_timed_may_goto(void)3137 u64 __weak arch_bpf_timed_may_goto(void)
3138 {
3139 return 0;
3140 }
3141
bpf_check_timed_may_goto(struct bpf_timed_may_goto * p)3142 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p)
3143 {
3144 u64 time = ktime_get_mono_fast_ns();
3145
3146 /* Populate the timestamp for this stack frame, and refresh count. */
3147 if (!p->timestamp) {
3148 p->timestamp = time;
3149 return BPF_MAX_TIMED_LOOPS;
3150 }
3151 /* Check if we've exhausted our time slice, and zero count. */
3152 if (time - p->timestamp >= (NSEC_PER_SEC / 4))
3153 return 0;
3154 /* Refresh the count for the stack frame. */
3155 return BPF_MAX_TIMED_LOOPS;
3156 }
3157
3158 /* for configs without MMU or 32-bit */
3159 __weak const struct bpf_map_ops arena_map_ops;
bpf_arena_get_user_vm_start(struct bpf_arena * arena)3160 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3161 {
3162 return 0;
3163 }
bpf_arena_get_kern_vm_start(struct bpf_arena * arena)3164 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3165 {
3166 return 0;
3167 }
3168
3169 #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)3170 static int __init bpf_global_ma_init(void)
3171 {
3172 int ret;
3173
3174 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3175 bpf_global_ma_set = !ret;
3176 return ret;
3177 }
3178 late_initcall(bpf_global_ma_init);
3179 #endif
3180
3181 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3182 EXPORT_SYMBOL(bpf_stats_enabled_key);
3183
3184 /* All definitions of tracepoints related to BPF. */
3185 #define CREATE_TRACE_POINTS
3186 #include <linux/bpf_trace.h>
3187
3188 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3189 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3190