1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16
17 struct bpf_struct_ops_value {
18 struct bpf_struct_ops_common_value common;
19 char data[] ____cacheline_aligned_in_smp;
20 };
21
22 #define MAX_TRAMP_IMAGE_PAGES 8
23
24 struct bpf_struct_ops_map {
25 struct bpf_map map;
26 const struct bpf_struct_ops_desc *st_ops_desc;
27 /* protect map_update */
28 struct mutex lock;
29 /* link has all the bpf_links that is populated
30 * to the func ptr of the kernel's struct
31 * (in kvalue.data).
32 */
33 struct bpf_link **links;
34 /* ksyms for bpf trampolines */
35 struct bpf_ksym **ksyms;
36 u32 funcs_cnt;
37 u32 image_pages_cnt;
38 /* image_pages is an array of pages that has all the trampolines
39 * that stores the func args before calling the bpf_prog.
40 */
41 void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 /* The owner moduler's btf. */
43 struct btf *btf;
44 /* uvalue->data stores the kernel struct
45 * (e.g. tcp_congestion_ops) that is more useful
46 * to userspace than the kvalue. For example,
47 * the bpf_prog's id is stored instead of the kernel
48 * address of a func ptr.
49 */
50 struct bpf_struct_ops_value *uvalue;
51 /* kvalue.data stores the actual kernel's struct
52 * (e.g. tcp_congestion_ops) that will be
53 * registered to the kernel subsystem.
54 */
55 struct bpf_struct_ops_value kvalue;
56 };
57
58 struct bpf_struct_ops_link {
59 struct bpf_link link;
60 struct bpf_map __rcu *map;
61 wait_queue_head_t wait_hup;
62 };
63
64 static DEFINE_MUTEX(update_mutex);
65
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
68
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
70 };
71
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
73 #ifdef CONFIG_NET
74 .test_run = bpf_struct_ops_test_run,
75 #endif
76 };
77
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
81
82 enum {
83 IDX_MODULE_ID,
84 IDX_ST_OPS_COMMON_VALUE_ID,
85 };
86
87 extern struct btf *btf_vmlinux;
88
is_valid_value_type(struct btf * btf,s32 value_id,const struct btf_type * type,const char * value_name)89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 const struct btf_type *type,
91 const char *value_name)
92 {
93 const struct btf_type *common_value_type;
94 const struct btf_member *member;
95 const struct btf_type *vt, *mt;
96
97 vt = btf_type_by_id(btf, value_id);
98 if (btf_vlen(vt) != 2) {
99 pr_warn("The number of %s's members should be 2, but we get %d\n",
100 value_name, btf_vlen(vt));
101 return false;
102 }
103 member = btf_type_member(vt);
104 mt = btf_type_by_id(btf, member->type);
105 common_value_type = btf_type_by_id(btf_vmlinux,
106 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 if (mt != common_value_type) {
108 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
109 value_name);
110 return false;
111 }
112 member++;
113 mt = btf_type_by_id(btf, member->type);
114 if (mt != type) {
115 pr_warn("The second member of %s should be %s\n",
116 value_name, btf_name_by_offset(btf, type->name_off));
117 return false;
118 }
119
120 return true;
121 }
122
bpf_struct_ops_image_alloc(void)123 static void *bpf_struct_ops_image_alloc(void)
124 {
125 void *image;
126 int err;
127
128 err = bpf_jit_charge_modmem(PAGE_SIZE);
129 if (err)
130 return ERR_PTR(err);
131 image = arch_alloc_bpf_trampoline(PAGE_SIZE);
132 if (!image) {
133 bpf_jit_uncharge_modmem(PAGE_SIZE);
134 return ERR_PTR(-ENOMEM);
135 }
136
137 return image;
138 }
139
bpf_struct_ops_image_free(void * image)140 void bpf_struct_ops_image_free(void *image)
141 {
142 if (image) {
143 arch_free_bpf_trampoline(image, PAGE_SIZE);
144 bpf_jit_uncharge_modmem(PAGE_SIZE);
145 }
146 }
147
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define REFCOUNTED_SUFFIX "__ref"
150
151 /* Prepare argument info for every nullable argument of a member of a
152 * struct_ops type.
153 *
154 * Initialize a struct bpf_struct_ops_arg_info according to type info of
155 * the arguments of a stub function. (Check kCFI for more information about
156 * stub functions.)
157 *
158 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
159 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
160 * the information that used by the verifier to check the arguments of the
161 * BPF struct_ops program assigned to the member. Here, we only care about
162 * the arguments that are marked as __nullable.
163 *
164 * The array of struct bpf_ctx_arg_aux is eventually assigned to
165 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
166 * verifier. (See check_struct_ops_btf_id())
167 *
168 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
169 * fails, it will be kept untouched.
170 */
prepare_arg_info(struct btf * btf,const char * st_ops_name,const char * member_name,const struct btf_type * func_proto,void * stub_func_addr,struct bpf_struct_ops_arg_info * arg_info)171 static int prepare_arg_info(struct btf *btf,
172 const char *st_ops_name,
173 const char *member_name,
174 const struct btf_type *func_proto, void *stub_func_addr,
175 struct bpf_struct_ops_arg_info *arg_info)
176 {
177 const struct btf_type *stub_func_proto, *pointed_type;
178 bool is_nullable = false, is_refcounted = false;
179 const struct btf_param *stub_args, *args;
180 struct bpf_ctx_arg_aux *info, *info_buf;
181 u32 nargs, arg_no, info_cnt = 0;
182 char ksym[KSYM_SYMBOL_LEN];
183 const char *stub_fname;
184 const char *suffix;
185 s32 stub_func_id;
186 u32 arg_btf_id;
187 int offset;
188
189 stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym);
190 if (!stub_fname) {
191 pr_warn("Cannot find the stub function name for the %s in struct %s\n",
192 member_name, st_ops_name);
193 return -ENOENT;
194 }
195
196 stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC);
197 if (stub_func_id < 0) {
198 pr_warn("Cannot find the stub function %s in btf\n", stub_fname);
199 return -ENOENT;
200 }
201
202 stub_func_proto = btf_type_by_id(btf, stub_func_id);
203 stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
204
205 /* Check if the number of arguments of the stub function is the same
206 * as the number of arguments of the function pointer.
207 */
208 nargs = btf_type_vlen(func_proto);
209 if (nargs != btf_type_vlen(stub_func_proto)) {
210 pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n",
211 stub_fname, member_name, st_ops_name);
212 return -EINVAL;
213 }
214
215 if (!nargs)
216 return 0;
217
218 args = btf_params(func_proto);
219 stub_args = btf_params(stub_func_proto);
220
221 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
222 if (!info_buf)
223 return -ENOMEM;
224
225 /* Prepare info for every nullable argument */
226 info = info_buf;
227 for (arg_no = 0; arg_no < nargs; arg_no++) {
228 /* Skip arguments that is not suffixed with
229 * "__nullable or __ref".
230 */
231 is_nullable = btf_param_match_suffix(btf, &stub_args[arg_no],
232 MAYBE_NULL_SUFFIX);
233 is_refcounted = btf_param_match_suffix(btf, &stub_args[arg_no],
234 REFCOUNTED_SUFFIX);
235
236 if (is_nullable)
237 suffix = MAYBE_NULL_SUFFIX;
238 else if (is_refcounted)
239 suffix = REFCOUNTED_SUFFIX;
240 else
241 continue;
242
243 /* Should be a pointer to struct */
244 pointed_type = btf_type_resolve_ptr(btf,
245 args[arg_no].type,
246 &arg_btf_id);
247 if (!pointed_type ||
248 !btf_type_is_struct(pointed_type)) {
249 pr_warn("stub function %s has %s tagging to an unsupported type\n",
250 stub_fname, suffix);
251 goto err_out;
252 }
253
254 offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
255 if (offset < 0) {
256 pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n",
257 stub_fname, arg_no);
258 goto err_out;
259 }
260
261 if (args[arg_no].type != stub_args[arg_no].type) {
262 pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n",
263 arg_no, stub_fname);
264 goto err_out;
265 }
266
267 /* Fill the information of the new argument */
268 info->btf_id = arg_btf_id;
269 info->btf = btf;
270 info->offset = offset;
271 if (is_nullable) {
272 info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
273 } else if (is_refcounted) {
274 info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID;
275 info->refcounted = true;
276 }
277
278 info++;
279 info_cnt++;
280 }
281
282 if (info_cnt) {
283 arg_info->info = info_buf;
284 arg_info->cnt = info_cnt;
285 } else {
286 kfree(info_buf);
287 }
288
289 return 0;
290
291 err_out:
292 kfree(info_buf);
293
294 return -EINVAL;
295 }
296
297 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)298 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
299 {
300 struct bpf_struct_ops_arg_info *arg_info;
301 int i;
302
303 arg_info = st_ops_desc->arg_info;
304 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
305 kfree(arg_info[i].info);
306
307 kfree(arg_info);
308 }
309
is_module_member(const struct btf * btf,u32 id)310 static bool is_module_member(const struct btf *btf, u32 id)
311 {
312 const struct btf_type *t;
313
314 t = btf_type_resolve_ptr(btf, id, NULL);
315 if (!t)
316 return false;
317
318 if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
319 return false;
320
321 return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
322 }
323
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)324 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
325 {
326 void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
327
328 return func_ptr ? 0 : -ENOTSUPP;
329 }
330
bpf_struct_ops_desc_init(struct bpf_struct_ops_desc * st_ops_desc,struct btf * btf,struct bpf_verifier_log * log)331 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
332 struct btf *btf,
333 struct bpf_verifier_log *log)
334 {
335 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
336 struct bpf_struct_ops_arg_info *arg_info;
337 const struct btf_member *member;
338 const struct btf_type *t;
339 s32 type_id, value_id;
340 char value_name[128];
341 const char *mname;
342 int i, err;
343
344 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
345 sizeof(value_name)) {
346 pr_warn("struct_ops name %s is too long\n",
347 st_ops->name);
348 return -EINVAL;
349 }
350 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
351
352 if (!st_ops->cfi_stubs) {
353 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
354 return -EINVAL;
355 }
356
357 type_id = btf_find_by_name_kind(btf, st_ops->name,
358 BTF_KIND_STRUCT);
359 if (type_id < 0) {
360 pr_warn("Cannot find struct %s in %s\n",
361 st_ops->name, btf_get_name(btf));
362 return -EINVAL;
363 }
364 t = btf_type_by_id(btf, type_id);
365 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
366 pr_warn("Cannot support #%u members in struct %s\n",
367 btf_type_vlen(t), st_ops->name);
368 return -EINVAL;
369 }
370
371 value_id = btf_find_by_name_kind(btf, value_name,
372 BTF_KIND_STRUCT);
373 if (value_id < 0) {
374 pr_warn("Cannot find struct %s in %s\n",
375 value_name, btf_get_name(btf));
376 return -EINVAL;
377 }
378 if (!is_valid_value_type(btf, value_id, t, value_name))
379 return -EINVAL;
380
381 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
382 GFP_KERNEL);
383 if (!arg_info)
384 return -ENOMEM;
385
386 st_ops_desc->arg_info = arg_info;
387 st_ops_desc->type = t;
388 st_ops_desc->type_id = type_id;
389 st_ops_desc->value_id = value_id;
390 st_ops_desc->value_type = btf_type_by_id(btf, value_id);
391
392 for_each_member(i, t, member) {
393 const struct btf_type *func_proto, *ret_type;
394 void **stub_func_addr;
395 u32 moff;
396
397 moff = __btf_member_bit_offset(t, member) / 8;
398 mname = btf_name_by_offset(btf, member->name_off);
399 if (!*mname) {
400 pr_warn("anon member in struct %s is not supported\n",
401 st_ops->name);
402 err = -EOPNOTSUPP;
403 goto errout;
404 }
405
406 if (__btf_member_bitfield_size(t, member)) {
407 pr_warn("bit field member %s in struct %s is not supported\n",
408 mname, st_ops->name);
409 err = -EOPNOTSUPP;
410 goto errout;
411 }
412
413 if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
414 pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
415 st_ops->name);
416 err = -EOPNOTSUPP;
417 goto errout;
418 }
419
420 func_proto = btf_type_resolve_func_ptr(btf,
421 member->type,
422 NULL);
423
424 /* The member is not a function pointer or
425 * the function pointer is not supported.
426 */
427 if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
428 continue;
429
430 if (func_proto->type) {
431 ret_type = btf_type_resolve_ptr(btf, func_proto->type, NULL);
432 if (ret_type && !__btf_type_is_struct(ret_type)) {
433 pr_warn("func ptr %s in struct %s returns non-struct pointer, which is not supported\n",
434 mname, st_ops->name);
435 err = -EOPNOTSUPP;
436 goto errout;
437 }
438 }
439
440 if (btf_distill_func_proto(log, btf,
441 func_proto, mname,
442 &st_ops->func_models[i])) {
443 pr_warn("Error in parsing func ptr %s in struct %s\n",
444 mname, st_ops->name);
445 err = -EINVAL;
446 goto errout;
447 }
448
449 stub_func_addr = *(void **)(st_ops->cfi_stubs + moff);
450 err = prepare_arg_info(btf, st_ops->name, mname,
451 func_proto, stub_func_addr,
452 arg_info + i);
453 if (err)
454 goto errout;
455 }
456
457 if (st_ops->init(btf)) {
458 pr_warn("Error in init bpf_struct_ops %s\n",
459 st_ops->name);
460 err = -EINVAL;
461 goto errout;
462 }
463
464 return 0;
465
466 errout:
467 bpf_struct_ops_desc_release(st_ops_desc);
468
469 return err;
470 }
471
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)472 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
473 void *next_key)
474 {
475 if (key && *(u32 *)key == 0)
476 return -ENOENT;
477
478 *(u32 *)next_key = 0;
479 return 0;
480 }
481
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)482 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
483 void *value)
484 {
485 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
486 struct bpf_struct_ops_value *uvalue, *kvalue;
487 enum bpf_struct_ops_state state;
488 s64 refcnt;
489
490 if (unlikely(*(u32 *)key != 0))
491 return -ENOENT;
492
493 kvalue = &st_map->kvalue;
494 /* Pair with smp_store_release() during map_update */
495 state = smp_load_acquire(&kvalue->common.state);
496 if (state == BPF_STRUCT_OPS_STATE_INIT) {
497 memset(value, 0, map->value_size);
498 return 0;
499 }
500
501 /* No lock is needed. state and refcnt do not need
502 * to be updated together under atomic context.
503 */
504 uvalue = value;
505 memcpy(uvalue, st_map->uvalue, map->value_size);
506 uvalue->common.state = state;
507
508 /* This value offers the user space a general estimate of how
509 * many sockets are still utilizing this struct_ops for TCP
510 * congestion control. The number might not be exact, but it
511 * should sufficiently meet our present goals.
512 */
513 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
514 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
515
516 return 0;
517 }
518
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)519 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
520 {
521 return ERR_PTR(-EINVAL);
522 }
523
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)524 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
525 {
526 u32 i;
527
528 for (i = 0; i < st_map->funcs_cnt; i++) {
529 if (!st_map->links[i])
530 break;
531 bpf_link_put(st_map->links[i]);
532 st_map->links[i] = NULL;
533 }
534 }
535
bpf_struct_ops_map_free_image(struct bpf_struct_ops_map * st_map)536 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
537 {
538 int i;
539
540 for (i = 0; i < st_map->image_pages_cnt; i++)
541 bpf_struct_ops_image_free(st_map->image_pages[i]);
542 st_map->image_pages_cnt = 0;
543 }
544
check_zero_holes(const struct btf * btf,const struct btf_type * t,void * data)545 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
546 {
547 const struct btf_member *member;
548 u32 i, moff, msize, prev_mend = 0;
549 const struct btf_type *mtype;
550
551 for_each_member(i, t, member) {
552 moff = __btf_member_bit_offset(t, member) / 8;
553 if (moff > prev_mend &&
554 memchr_inv(data + prev_mend, 0, moff - prev_mend))
555 return -EINVAL;
556
557 mtype = btf_type_by_id(btf, member->type);
558 mtype = btf_resolve_size(btf, mtype, &msize);
559 if (IS_ERR(mtype))
560 return PTR_ERR(mtype);
561 prev_mend = moff + msize;
562 }
563
564 if (t->size > prev_mend &&
565 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
566 return -EINVAL;
567
568 return 0;
569 }
570
bpf_struct_ops_link_release(struct bpf_link * link)571 static void bpf_struct_ops_link_release(struct bpf_link *link)
572 {
573 }
574
bpf_struct_ops_link_dealloc(struct bpf_link * link)575 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
576 {
577 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
578
579 kfree(tlink);
580 }
581
582 const struct bpf_link_ops bpf_struct_ops_link_lops = {
583 .release = bpf_struct_ops_link_release,
584 .dealloc = bpf_struct_ops_link_dealloc,
585 };
586
bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links * tlinks,struct bpf_tramp_link * link,const struct btf_func_model * model,void * stub_func,void ** _image,u32 * _image_off,bool allow_alloc)587 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
588 struct bpf_tramp_link *link,
589 const struct btf_func_model *model,
590 void *stub_func,
591 void **_image, u32 *_image_off,
592 bool allow_alloc)
593 {
594 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
595 void *image = *_image;
596 int size;
597
598 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
599 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
600
601 if (model->ret_size > 0)
602 flags |= BPF_TRAMP_F_RET_FENTRY_RET;
603
604 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
605 if (size <= 0)
606 return size ? : -EFAULT;
607
608 /* Allocate image buffer if necessary */
609 if (!image || size > PAGE_SIZE - image_off) {
610 if (!allow_alloc)
611 return -E2BIG;
612
613 image = bpf_struct_ops_image_alloc();
614 if (IS_ERR(image))
615 return PTR_ERR(image);
616 image_off = 0;
617 }
618
619 size = arch_prepare_bpf_trampoline(NULL, image + image_off,
620 image + image_off + size,
621 model, flags, tlinks, stub_func);
622 if (size <= 0) {
623 if (image != *_image)
624 bpf_struct_ops_image_free(image);
625 return size ? : -EFAULT;
626 }
627
628 *_image = image;
629 *_image_off = image_off + size;
630 return 0;
631 }
632
bpf_struct_ops_ksym_init(const char * tname,const char * mname,void * image,unsigned int size,struct bpf_ksym * ksym)633 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
634 void *image, unsigned int size,
635 struct bpf_ksym *ksym)
636 {
637 snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
638 INIT_LIST_HEAD_RCU(&ksym->lnode);
639 bpf_image_ksym_init(image, size, ksym);
640 }
641
bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map * st_map)642 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
643 {
644 u32 i;
645
646 for (i = 0; i < st_map->funcs_cnt; i++) {
647 if (!st_map->ksyms[i])
648 break;
649 bpf_image_ksym_add(st_map->ksyms[i]);
650 }
651 }
652
bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map * st_map)653 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
654 {
655 u32 i;
656
657 for (i = 0; i < st_map->funcs_cnt; i++) {
658 if (!st_map->ksyms[i])
659 break;
660 bpf_image_ksym_del(st_map->ksyms[i]);
661 }
662 }
663
bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map * st_map)664 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
665 {
666 u32 i;
667
668 for (i = 0; i < st_map->funcs_cnt; i++) {
669 if (!st_map->ksyms[i])
670 break;
671 kfree(st_map->ksyms[i]);
672 st_map->ksyms[i] = NULL;
673 }
674 }
675
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)676 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
677 void *value, u64 flags)
678 {
679 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
680 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
681 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
682 struct bpf_struct_ops_value *uvalue, *kvalue;
683 const struct btf_type *module_type;
684 const struct btf_member *member;
685 const struct btf_type *t = st_ops_desc->type;
686 struct bpf_tramp_links *tlinks;
687 void *udata, *kdata;
688 int prog_fd, err;
689 u32 i, trampoline_start, image_off = 0;
690 void *cur_image = NULL, *image = NULL;
691 struct bpf_link **plink;
692 struct bpf_ksym **pksym;
693 const char *tname, *mname;
694
695 if (flags)
696 return -EINVAL;
697
698 if (*(u32 *)key != 0)
699 return -E2BIG;
700
701 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
702 if (err)
703 return err;
704
705 uvalue = value;
706 err = check_zero_holes(st_map->btf, t, uvalue->data);
707 if (err)
708 return err;
709
710 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
711 return -EINVAL;
712
713 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
714 if (!tlinks)
715 return -ENOMEM;
716
717 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
718 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
719
720 mutex_lock(&st_map->lock);
721
722 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
723 err = -EBUSY;
724 goto unlock;
725 }
726
727 memcpy(uvalue, value, map->value_size);
728
729 udata = &uvalue->data;
730 kdata = &kvalue->data;
731
732 plink = st_map->links;
733 pksym = st_map->ksyms;
734 tname = btf_name_by_offset(st_map->btf, t->name_off);
735 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
736 for_each_member(i, t, member) {
737 const struct btf_type *mtype, *ptype;
738 struct bpf_prog *prog;
739 struct bpf_tramp_link *link;
740 struct bpf_ksym *ksym;
741 u32 moff;
742
743 moff = __btf_member_bit_offset(t, member) / 8;
744 mname = btf_name_by_offset(st_map->btf, member->name_off);
745 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
746 if (ptype == module_type) {
747 if (*(void **)(udata + moff))
748 goto reset_unlock;
749 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
750 continue;
751 }
752
753 err = st_ops->init_member(t, member, kdata, udata);
754 if (err < 0)
755 goto reset_unlock;
756
757 /* The ->init_member() has handled this member */
758 if (err > 0)
759 continue;
760
761 /* If st_ops->init_member does not handle it,
762 * we will only handle func ptrs and zero-ed members
763 * here. Reject everything else.
764 */
765
766 /* All non func ptr member must be 0 */
767 if (!ptype || !btf_type_is_func_proto(ptype)) {
768 u32 msize;
769
770 mtype = btf_type_by_id(st_map->btf, member->type);
771 mtype = btf_resolve_size(st_map->btf, mtype, &msize);
772 if (IS_ERR(mtype)) {
773 err = PTR_ERR(mtype);
774 goto reset_unlock;
775 }
776
777 if (memchr_inv(udata + moff, 0, msize)) {
778 err = -EINVAL;
779 goto reset_unlock;
780 }
781
782 continue;
783 }
784
785 prog_fd = (int)(*(unsigned long *)(udata + moff));
786 /* Similar check as the attr->attach_prog_fd */
787 if (!prog_fd)
788 continue;
789
790 prog = bpf_prog_get(prog_fd);
791 if (IS_ERR(prog)) {
792 err = PTR_ERR(prog);
793 goto reset_unlock;
794 }
795
796 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
797 prog->aux->attach_btf_id != st_ops_desc->type_id ||
798 prog->expected_attach_type != i) {
799 bpf_prog_put(prog);
800 err = -EINVAL;
801 goto reset_unlock;
802 }
803
804 link = kzalloc(sizeof(*link), GFP_USER);
805 if (!link) {
806 bpf_prog_put(prog);
807 err = -ENOMEM;
808 goto reset_unlock;
809 }
810 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
811 &bpf_struct_ops_link_lops, prog);
812 *plink++ = &link->link;
813
814 ksym = kzalloc(sizeof(*ksym), GFP_USER);
815 if (!ksym) {
816 err = -ENOMEM;
817 goto reset_unlock;
818 }
819 *pksym++ = ksym;
820
821 trampoline_start = image_off;
822 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
823 &st_ops->func_models[i],
824 *(void **)(st_ops->cfi_stubs + moff),
825 &image, &image_off,
826 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
827 if (err)
828 goto reset_unlock;
829
830 if (cur_image != image) {
831 st_map->image_pages[st_map->image_pages_cnt++] = image;
832 cur_image = image;
833 trampoline_start = 0;
834 }
835
836 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
837
838 /* put prog_id to udata */
839 *(unsigned long *)(udata + moff) = prog->aux->id;
840
841 /* init ksym for this trampoline */
842 bpf_struct_ops_ksym_init(tname, mname,
843 image + trampoline_start,
844 image_off - trampoline_start,
845 ksym);
846 }
847
848 if (st_ops->validate) {
849 err = st_ops->validate(kdata);
850 if (err)
851 goto reset_unlock;
852 }
853 for (i = 0; i < st_map->image_pages_cnt; i++) {
854 err = arch_protect_bpf_trampoline(st_map->image_pages[i],
855 PAGE_SIZE);
856 if (err)
857 goto reset_unlock;
858 }
859
860 if (st_map->map.map_flags & BPF_F_LINK) {
861 err = 0;
862 /* Let bpf_link handle registration & unregistration.
863 *
864 * Pair with smp_load_acquire() during lookup_elem().
865 */
866 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
867 goto unlock;
868 }
869
870 err = st_ops->reg(kdata, NULL);
871 if (likely(!err)) {
872 /* This refcnt increment on the map here after
873 * 'st_ops->reg()' is secure since the state of the
874 * map must be set to INIT at this moment, and thus
875 * bpf_struct_ops_map_delete_elem() can't unregister
876 * or transition it to TOBEFREE concurrently.
877 */
878 bpf_map_inc(map);
879 /* Pair with smp_load_acquire() during lookup_elem().
880 * It ensures the above udata updates (e.g. prog->aux->id)
881 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
882 */
883 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
884 goto unlock;
885 }
886
887 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
888 * verified as a whole, after all init_member() calls. Can also happen if
889 * there was a race in registering the struct_ops (under the same name) to
890 * a sub-system through different struct_ops's maps.
891 */
892
893 reset_unlock:
894 bpf_struct_ops_map_free_ksyms(st_map);
895 bpf_struct_ops_map_free_image(st_map);
896 bpf_struct_ops_map_put_progs(st_map);
897 memset(uvalue, 0, map->value_size);
898 memset(kvalue, 0, map->value_size);
899 unlock:
900 kfree(tlinks);
901 mutex_unlock(&st_map->lock);
902 if (!err)
903 bpf_struct_ops_map_add_ksyms(st_map);
904 return err;
905 }
906
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)907 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
908 {
909 enum bpf_struct_ops_state prev_state;
910 struct bpf_struct_ops_map *st_map;
911
912 st_map = (struct bpf_struct_ops_map *)map;
913 if (st_map->map.map_flags & BPF_F_LINK)
914 return -EOPNOTSUPP;
915
916 prev_state = cmpxchg(&st_map->kvalue.common.state,
917 BPF_STRUCT_OPS_STATE_INUSE,
918 BPF_STRUCT_OPS_STATE_TOBEFREE);
919 switch (prev_state) {
920 case BPF_STRUCT_OPS_STATE_INUSE:
921 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
922 bpf_map_put(map);
923 return 0;
924 case BPF_STRUCT_OPS_STATE_TOBEFREE:
925 return -EINPROGRESS;
926 case BPF_STRUCT_OPS_STATE_INIT:
927 return -ENOENT;
928 default:
929 WARN_ON_ONCE(1);
930 /* Should never happen. Treat it as not found. */
931 return -ENOENT;
932 }
933 }
934
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)935 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
936 struct seq_file *m)
937 {
938 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
939 void *value;
940 int err;
941
942 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
943 if (!value)
944 return;
945
946 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
947 if (!err) {
948 btf_type_seq_show(st_map->btf,
949 map->btf_vmlinux_value_type_id,
950 value, m);
951 seq_putc(m, '\n');
952 }
953
954 kfree(value);
955 }
956
__bpf_struct_ops_map_free(struct bpf_map * map)957 static void __bpf_struct_ops_map_free(struct bpf_map *map)
958 {
959 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
960
961 if (st_map->links)
962 bpf_struct_ops_map_put_progs(st_map);
963 if (st_map->ksyms)
964 bpf_struct_ops_map_free_ksyms(st_map);
965 bpf_map_area_free(st_map->links);
966 bpf_map_area_free(st_map->ksyms);
967 bpf_struct_ops_map_free_image(st_map);
968 bpf_map_area_free(st_map->uvalue);
969 bpf_map_area_free(st_map);
970 }
971
bpf_struct_ops_map_free(struct bpf_map * map)972 static void bpf_struct_ops_map_free(struct bpf_map *map)
973 {
974 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
975
976 /* st_ops->owner was acquired during map_alloc to implicitly holds
977 * the btf's refcnt. The acquire was only done when btf_is_module()
978 * st_map->btf cannot be NULL here.
979 */
980 if (btf_is_module(st_map->btf))
981 module_put(st_map->st_ops_desc->st_ops->owner);
982
983 bpf_struct_ops_map_del_ksyms(st_map);
984
985 /* The struct_ops's function may switch to another struct_ops.
986 *
987 * For example, bpf_tcp_cc_x->init() may switch to
988 * another tcp_cc_y by calling
989 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
990 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
991 * and its refcount may reach 0 which then free its
992 * trampoline image while tcp_cc_x is still running.
993 *
994 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
995 * to finish. bpf-tcp-cc prog is non sleepable.
996 * A rcu_tasks gp is to wait for the last few insn
997 * in the tramopline image to finish before releasing
998 * the trampoline image.
999 */
1000 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
1001
1002 __bpf_struct_ops_map_free(map);
1003 }
1004
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)1005 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
1006 {
1007 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
1008 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
1009 !attr->btf_vmlinux_value_type_id)
1010 return -EINVAL;
1011 return 0;
1012 }
1013
count_func_ptrs(const struct btf * btf,const struct btf_type * t)1014 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
1015 {
1016 int i;
1017 u32 count;
1018 const struct btf_member *member;
1019
1020 count = 0;
1021 for_each_member(i, t, member)
1022 if (btf_type_resolve_func_ptr(btf, member->type, NULL))
1023 count++;
1024 return count;
1025 }
1026
bpf_struct_ops_map_alloc(union bpf_attr * attr)1027 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1028 {
1029 const struct bpf_struct_ops_desc *st_ops_desc;
1030 size_t st_map_size;
1031 struct bpf_struct_ops_map *st_map;
1032 const struct btf_type *t, *vt;
1033 struct module *mod = NULL;
1034 struct bpf_map *map;
1035 struct btf *btf;
1036 int ret;
1037
1038 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1039 /* The map holds btf for its whole life time. */
1040 btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1041 if (IS_ERR(btf))
1042 return ERR_CAST(btf);
1043 if (!btf_is_module(btf)) {
1044 btf_put(btf);
1045 return ERR_PTR(-EINVAL);
1046 }
1047
1048 mod = btf_try_get_module(btf);
1049 /* mod holds a refcnt to btf. We don't need an extra refcnt
1050 * here.
1051 */
1052 btf_put(btf);
1053 if (!mod)
1054 return ERR_PTR(-EINVAL);
1055 } else {
1056 btf = bpf_get_btf_vmlinux();
1057 if (IS_ERR(btf))
1058 return ERR_CAST(btf);
1059 if (!btf)
1060 return ERR_PTR(-ENOTSUPP);
1061 }
1062
1063 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1064 if (!st_ops_desc) {
1065 ret = -ENOTSUPP;
1066 goto errout;
1067 }
1068
1069 vt = st_ops_desc->value_type;
1070 if (attr->value_size != vt->size) {
1071 ret = -EINVAL;
1072 goto errout;
1073 }
1074
1075 t = st_ops_desc->type;
1076
1077 st_map_size = sizeof(*st_map) +
1078 /* kvalue stores the
1079 * struct bpf_struct_ops_tcp_congestions_ops
1080 */
1081 (vt->size - sizeof(struct bpf_struct_ops_value));
1082
1083 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1084 if (!st_map) {
1085 ret = -ENOMEM;
1086 goto errout;
1087 }
1088
1089 st_map->st_ops_desc = st_ops_desc;
1090 map = &st_map->map;
1091
1092 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1093 st_map->funcs_cnt = count_func_ptrs(btf, t);
1094 st_map->links =
1095 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1096 NUMA_NO_NODE);
1097
1098 st_map->ksyms =
1099 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1100 NUMA_NO_NODE);
1101 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1102 ret = -ENOMEM;
1103 goto errout_free;
1104 }
1105 st_map->btf = btf;
1106
1107 mutex_init(&st_map->lock);
1108 bpf_map_init_from_attr(map, attr);
1109
1110 return map;
1111
1112 errout_free:
1113 __bpf_struct_ops_map_free(map);
1114 errout:
1115 module_put(mod);
1116
1117 return ERR_PTR(ret);
1118 }
1119
bpf_struct_ops_map_mem_usage(const struct bpf_map * map)1120 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1121 {
1122 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1123 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1124 const struct btf_type *vt = st_ops_desc->value_type;
1125 u64 usage;
1126
1127 usage = sizeof(*st_map) +
1128 vt->size - sizeof(struct bpf_struct_ops_value);
1129 usage += vt->size;
1130 usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1131 usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1132 usage += PAGE_SIZE;
1133 return usage;
1134 }
1135
1136 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1137 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1138 .map_alloc_check = bpf_struct_ops_map_alloc_check,
1139 .map_alloc = bpf_struct_ops_map_alloc,
1140 .map_free = bpf_struct_ops_map_free,
1141 .map_get_next_key = bpf_struct_ops_map_get_next_key,
1142 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1143 .map_delete_elem = bpf_struct_ops_map_delete_elem,
1144 .map_update_elem = bpf_struct_ops_map_update_elem,
1145 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1146 .map_mem_usage = bpf_struct_ops_map_mem_usage,
1147 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1148 };
1149
1150 /* "const void *" because some subsystem is
1151 * passing a const (e.g. const struct tcp_congestion_ops *)
1152 */
bpf_struct_ops_get(const void * kdata)1153 bool bpf_struct_ops_get(const void *kdata)
1154 {
1155 struct bpf_struct_ops_value *kvalue;
1156 struct bpf_struct_ops_map *st_map;
1157 struct bpf_map *map;
1158
1159 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1160 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1161
1162 map = __bpf_map_inc_not_zero(&st_map->map, false);
1163 return !IS_ERR(map);
1164 }
1165
bpf_struct_ops_put(const void * kdata)1166 void bpf_struct_ops_put(const void *kdata)
1167 {
1168 struct bpf_struct_ops_value *kvalue;
1169 struct bpf_struct_ops_map *st_map;
1170
1171 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1172 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1173
1174 bpf_map_put(&st_map->map);
1175 }
1176
bpf_struct_ops_valid_to_reg(struct bpf_map * map)1177 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1178 {
1179 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1180
1181 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1182 map->map_flags & BPF_F_LINK &&
1183 /* Pair with smp_store_release() during map_update */
1184 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1185 }
1186
bpf_struct_ops_map_link_dealloc(struct bpf_link * link)1187 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1188 {
1189 struct bpf_struct_ops_link *st_link;
1190 struct bpf_struct_ops_map *st_map;
1191
1192 st_link = container_of(link, struct bpf_struct_ops_link, link);
1193 st_map = (struct bpf_struct_ops_map *)
1194 rcu_dereference_protected(st_link->map, true);
1195 if (st_map) {
1196 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1197 bpf_map_put(&st_map->map);
1198 }
1199 kfree(st_link);
1200 }
1201
bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1202 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1203 struct seq_file *seq)
1204 {
1205 struct bpf_struct_ops_link *st_link;
1206 struct bpf_map *map;
1207
1208 st_link = container_of(link, struct bpf_struct_ops_link, link);
1209 rcu_read_lock();
1210 map = rcu_dereference(st_link->map);
1211 if (map)
1212 seq_printf(seq, "map_id:\t%d\n", map->id);
1213 rcu_read_unlock();
1214 }
1215
bpf_struct_ops_map_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1216 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1217 struct bpf_link_info *info)
1218 {
1219 struct bpf_struct_ops_link *st_link;
1220 struct bpf_map *map;
1221
1222 st_link = container_of(link, struct bpf_struct_ops_link, link);
1223 rcu_read_lock();
1224 map = rcu_dereference(st_link->map);
1225 if (map)
1226 info->struct_ops.map_id = map->id;
1227 rcu_read_unlock();
1228 return 0;
1229 }
1230
bpf_struct_ops_map_link_update(struct bpf_link * link,struct bpf_map * new_map,struct bpf_map * expected_old_map)1231 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1232 struct bpf_map *expected_old_map)
1233 {
1234 struct bpf_struct_ops_map *st_map, *old_st_map;
1235 struct bpf_map *old_map;
1236 struct bpf_struct_ops_link *st_link;
1237 int err;
1238
1239 st_link = container_of(link, struct bpf_struct_ops_link, link);
1240 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1241
1242 if (!bpf_struct_ops_valid_to_reg(new_map))
1243 return -EINVAL;
1244
1245 if (!st_map->st_ops_desc->st_ops->update)
1246 return -EOPNOTSUPP;
1247
1248 mutex_lock(&update_mutex);
1249
1250 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1251 if (!old_map) {
1252 err = -ENOLINK;
1253 goto err_out;
1254 }
1255 if (expected_old_map && old_map != expected_old_map) {
1256 err = -EPERM;
1257 goto err_out;
1258 }
1259
1260 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1261 /* The new and old struct_ops must be the same type. */
1262 if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1263 err = -EINVAL;
1264 goto err_out;
1265 }
1266
1267 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1268 if (err)
1269 goto err_out;
1270
1271 bpf_map_inc(new_map);
1272 rcu_assign_pointer(st_link->map, new_map);
1273 bpf_map_put(old_map);
1274
1275 err_out:
1276 mutex_unlock(&update_mutex);
1277
1278 return err;
1279 }
1280
bpf_struct_ops_map_link_detach(struct bpf_link * link)1281 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1282 {
1283 struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1284 struct bpf_struct_ops_map *st_map;
1285 struct bpf_map *map;
1286
1287 mutex_lock(&update_mutex);
1288
1289 map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1290 if (!map) {
1291 mutex_unlock(&update_mutex);
1292 return 0;
1293 }
1294 st_map = container_of(map, struct bpf_struct_ops_map, map);
1295
1296 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1297
1298 RCU_INIT_POINTER(st_link->map, NULL);
1299 /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1300 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1301 */
1302 bpf_map_put(&st_map->map);
1303
1304 mutex_unlock(&update_mutex);
1305
1306 wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1307
1308 return 0;
1309 }
1310
bpf_struct_ops_map_link_poll(struct file * file,struct poll_table_struct * pts)1311 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1312 struct poll_table_struct *pts)
1313 {
1314 struct bpf_struct_ops_link *st_link = file->private_data;
1315
1316 poll_wait(file, &st_link->wait_hup, pts);
1317
1318 return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1319 }
1320
1321 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1322 .dealloc = bpf_struct_ops_map_link_dealloc,
1323 .detach = bpf_struct_ops_map_link_detach,
1324 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1325 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1326 .update_map = bpf_struct_ops_map_link_update,
1327 .poll = bpf_struct_ops_map_link_poll,
1328 };
1329
bpf_struct_ops_link_create(union bpf_attr * attr)1330 int bpf_struct_ops_link_create(union bpf_attr *attr)
1331 {
1332 struct bpf_struct_ops_link *link = NULL;
1333 struct bpf_link_primer link_primer;
1334 struct bpf_struct_ops_map *st_map;
1335 struct bpf_map *map;
1336 int err;
1337
1338 map = bpf_map_get(attr->link_create.map_fd);
1339 if (IS_ERR(map))
1340 return PTR_ERR(map);
1341
1342 st_map = (struct bpf_struct_ops_map *)map;
1343
1344 if (!bpf_struct_ops_valid_to_reg(map)) {
1345 err = -EINVAL;
1346 goto err_out;
1347 }
1348
1349 link = kzalloc(sizeof(*link), GFP_USER);
1350 if (!link) {
1351 err = -ENOMEM;
1352 goto err_out;
1353 }
1354 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1355
1356 err = bpf_link_prime(&link->link, &link_primer);
1357 if (err)
1358 goto err_out;
1359
1360 init_waitqueue_head(&link->wait_hup);
1361
1362 /* Hold the update_mutex such that the subsystem cannot
1363 * do link->ops->detach() before the link is fully initialized.
1364 */
1365 mutex_lock(&update_mutex);
1366 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1367 if (err) {
1368 mutex_unlock(&update_mutex);
1369 bpf_link_cleanup(&link_primer);
1370 link = NULL;
1371 goto err_out;
1372 }
1373 RCU_INIT_POINTER(link->map, map);
1374 mutex_unlock(&update_mutex);
1375
1376 return bpf_link_settle(&link_primer);
1377
1378 err_out:
1379 bpf_map_put(map);
1380 kfree(link);
1381 return err;
1382 }
1383
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)1384 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1385 {
1386 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1387
1388 info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1389 }
1390