Lines Matching +full:function +full:- +full:enumerator

1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
116 if (err != -EPERM || geteuid() != 0) in pr_perm_msg()
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", in pr_perm_msg()
149 fd = -1; \
235 * program. For the entry-point (main) BPF program, this is always
236 * zero. For a sub-program, this gets reset before each of main BPF
238 * whether sub-program was already appended to the main program, and
256 * entry-point BPF programs this includes the size of main program
257 * itself plus all the used sub-programs, appended at the end
304 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
476 #define obj_elf_valid(o) ((o)->efile.elf)
497 * it is possible that prog->instances.nr == -1. in bpf_program__unload()
499 if (prog->instances.nr > 0) { in bpf_program__unload()
500 for (i = 0; i < prog->instances.nr; i++) in bpf_program__unload()
501 zclose(prog->instances.fds[i]); in bpf_program__unload()
502 } else if (prog->instances.nr != -1) { in bpf_program__unload()
504 prog->instances.nr); in bpf_program__unload()
507 prog->instances.nr = -1; in bpf_program__unload()
508 zfree(&prog->instances.fds); in bpf_program__unload()
510 zfree(&prog->func_info); in bpf_program__unload()
511 zfree(&prog->line_info); in bpf_program__unload()
519 if (prog->clear_priv) in bpf_program__exit()
520 prog->clear_priv(prog, prog->priv); in bpf_program__exit()
522 prog->priv = NULL; in bpf_program__exit()
523 prog->clear_priv = NULL; in bpf_program__exit()
526 zfree(&prog->name); in bpf_program__exit()
527 zfree(&prog->sec_name); in bpf_program__exit()
528 zfree(&prog->pin_name); in bpf_program__exit()
529 zfree(&prog->insns); in bpf_program__exit()
530 zfree(&prog->reloc_desc); in bpf_program__exit()
532 prog->nr_reloc = 0; in bpf_program__exit()
533 prog->insns_cnt = 0; in bpf_program__exit()
534 prog->sec_idx = -1; in bpf_program__exit()
541 name = p = strdup(prog->sec_name); in __bpf_program__pin_name()
550 return BPF_CLASS(insn->code) == BPF_JMP && in insn_is_subprog_call()
551 BPF_OP(insn->code) == BPF_CALL && in insn_is_subprog_call()
552 BPF_SRC(insn->code) == BPF_K && in insn_is_subprog_call()
553 insn->src_reg == BPF_PSEUDO_CALL && in insn_is_subprog_call()
554 insn->dst_reg == 0 && in insn_is_subprog_call()
555 insn->off == 0; in insn_is_subprog_call()
566 return -EINVAL; in bpf_object__init_prog()
570 prog->obj = obj; in bpf_object__init_prog()
572 prog->sec_idx = sec_idx; in bpf_object__init_prog()
573 prog->sec_insn_off = sec_off / BPF_INSN_SZ; in bpf_object__init_prog()
574 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; in bpf_object__init_prog()
576 prog->insns_cnt = prog->sec_insn_cnt; in bpf_object__init_prog()
578 prog->type = BPF_PROG_TYPE_UNSPEC; in bpf_object__init_prog()
579 prog->load = true; in bpf_object__init_prog()
581 prog->instances.fds = NULL; in bpf_object__init_prog()
582 prog->instances.nr = -1; in bpf_object__init_prog()
584 prog->sec_name = strdup(sec_name); in bpf_object__init_prog()
585 if (!prog->sec_name) in bpf_object__init_prog()
588 prog->name = strdup(name); in bpf_object__init_prog()
589 if (!prog->name) in bpf_object__init_prog()
592 prog->pin_name = __bpf_program__pin_name(prog); in bpf_object__init_prog()
593 if (!prog->pin_name) in bpf_object__init_prog()
596 prog->insns = malloc(insn_data_sz); in bpf_object__init_prog()
597 if (!prog->insns) in bpf_object__init_prog()
599 memcpy(prog->insns, insn_data, insn_data_sz); in bpf_object__init_prog()
605 return -ENOMEM; in bpf_object__init_prog()
613 void *data = sec_data->d_buf; in bpf_object__add_programs()
614 size_t sec_sz = sec_data->d_size, sec_off, prog_sz; in bpf_object__add_programs()
619 progs = obj->programs; in bpf_object__add_programs()
620 nr_progs = obj->nr_programs; in bpf_object__add_programs()
627 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
636 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
642 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
651 * In this case the original obj->programs in bpf_object__add_programs()
657 return -ENOMEM; in bpf_object__add_programs()
659 obj->programs = progs; in bpf_object__add_programs()
669 obj->nr_programs = nr_progs; in bpf_object__add_programs()
710 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) in find_member_by_name()
762 if (kern_data_member->type == kern_type_id) in find_struct_ops_kern_types()
768 return -EINVAL; in find_struct_ops_kern_types()
782 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; in bpf_map__is_struct_ops()
798 st_ops = map->st_ops; in bpf_map__init_kern_struct_ops()
799 type = st_ops->type; in bpf_map__init_kern_struct_ops()
800 tname = st_ops->tname; in bpf_map__init_kern_struct_ops()
809 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); in bpf_map__init_kern_struct_ops()
811 map->def.value_size = kern_vtype->size; in bpf_map__init_kern_struct_ops()
812 map->btf_vmlinux_value_type_id = kern_vtype_id; in bpf_map__init_kern_struct_ops()
814 st_ops->kern_vdata = calloc(1, kern_vtype->size); in bpf_map__init_kern_struct_ops()
815 if (!st_ops->kern_vdata) in bpf_map__init_kern_struct_ops()
816 return -ENOMEM; in bpf_map__init_kern_struct_ops()
818 data = st_ops->data; in bpf_map__init_kern_struct_ops()
819 kern_data_off = kern_data_member->offset / 8; in bpf_map__init_kern_struct_ops()
820 kern_data = st_ops->kern_vdata + kern_data_off; in bpf_map__init_kern_struct_ops()
832 mname = btf__name_by_offset(btf, member->name_off); in bpf_map__init_kern_struct_ops()
836 map->name, mname); in bpf_map__init_kern_struct_ops()
837 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
840 kern_member_idx = kern_member - btf_members(kern_type); in bpf_map__init_kern_struct_ops()
844 map->name, mname); in bpf_map__init_kern_struct_ops()
845 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
848 moff = member->offset / 8; in bpf_map__init_kern_struct_ops()
849 kern_moff = kern_member->offset / 8; in bpf_map__init_kern_struct_ops()
854 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); in bpf_map__init_kern_struct_ops()
855 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, in bpf_map__init_kern_struct_ops()
857 if (BTF_INFO_KIND(mtype->info) != in bpf_map__init_kern_struct_ops()
858 BTF_INFO_KIND(kern_mtype->info)) { in bpf_map__init_kern_struct_ops()
860 map->name, mname, BTF_INFO_KIND(mtype->info), in bpf_map__init_kern_struct_ops()
861 BTF_INFO_KIND(kern_mtype->info)); in bpf_map__init_kern_struct_ops()
862 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
868 mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id); in bpf_map__init_kern_struct_ops()
870 kern_mtype->type, in bpf_map__init_kern_struct_ops()
875 map->name, mname); in bpf_map__init_kern_struct_ops()
876 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
879 prog = st_ops->progs[i]; in bpf_map__init_kern_struct_ops()
882 map->name, mname); in bpf_map__init_kern_struct_ops()
886 prog->attach_btf_id = kern_type_id; in bpf_map__init_kern_struct_ops()
887 prog->expected_attach_type = kern_member_idx; in bpf_map__init_kern_struct_ops()
889 st_ops->kern_func_off[i] = kern_data_off + kern_moff; in bpf_map__init_kern_struct_ops()
892 map->name, mname, prog->name, moff, in bpf_map__init_kern_struct_ops()
902 map->name, mname, (ssize_t)msize, in bpf_map__init_kern_struct_ops()
904 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
908 map->name, mname, (unsigned int)msize, in bpf_map__init_kern_struct_ops()
922 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__init_kern_struct_ops_maps()
923 map = &obj->maps[i]; in bpf_object__init_kern_struct_ops_maps()
928 err = bpf_map__init_kern_struct_ops(map, obj->btf, in bpf_object__init_kern_struct_ops_maps()
929 obj->btf_vmlinux); in bpf_object__init_kern_struct_ops_maps()
948 if (obj->efile.st_ops_shndx == -1) in bpf_object__init_struct_ops_maps()
951 btf = obj->btf; in bpf_object__init_struct_ops_maps()
957 return -EINVAL; in bpf_object__init_struct_ops_maps()
963 type = btf__type_by_id(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
964 var_name = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
966 type_id = btf__resolve_type(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
969 vsi->type, STRUCT_OPS_SEC); in bpf_object__init_struct_ops_maps()
970 return -EINVAL; in bpf_object__init_struct_ops_maps()
973 type = btf__type_by_id(obj->btf, type_id); in bpf_object__init_struct_ops_maps()
974 tname = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
977 return -ENOTSUP; in bpf_object__init_struct_ops_maps()
981 return -EINVAL; in bpf_object__init_struct_ops_maps()
988 map->sec_idx = obj->efile.st_ops_shndx; in bpf_object__init_struct_ops_maps()
989 map->sec_offset = vsi->offset; in bpf_object__init_struct_ops_maps()
990 map->name = strdup(var_name); in bpf_object__init_struct_ops_maps()
991 if (!map->name) in bpf_object__init_struct_ops_maps()
992 return -ENOMEM; in bpf_object__init_struct_ops_maps()
994 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; in bpf_object__init_struct_ops_maps()
995 map->def.key_size = sizeof(int); in bpf_object__init_struct_ops_maps()
996 map->def.value_size = type->size; in bpf_object__init_struct_ops_maps()
997 map->def.max_entries = 1; in bpf_object__init_struct_ops_maps()
999 map->st_ops = calloc(1, sizeof(*map->st_ops)); in bpf_object__init_struct_ops_maps()
1000 if (!map->st_ops) in bpf_object__init_struct_ops_maps()
1001 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1002 st_ops = map->st_ops; in bpf_object__init_struct_ops_maps()
1003 st_ops->data = malloc(type->size); in bpf_object__init_struct_ops_maps()
1004 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); in bpf_object__init_struct_ops_maps()
1005 st_ops->kern_func_off = malloc(btf_vlen(type) * in bpf_object__init_struct_ops_maps()
1006 sizeof(*st_ops->kern_func_off)); in bpf_object__init_struct_ops_maps()
1007 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) in bpf_object__init_struct_ops_maps()
1008 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { in bpf_object__init_struct_ops_maps()
1013 return -EINVAL; in bpf_object__init_struct_ops_maps()
1016 memcpy(st_ops->data, in bpf_object__init_struct_ops_maps()
1017 obj->efile.st_ops_data->d_buf + vsi->offset, in bpf_object__init_struct_ops_maps()
1018 type->size); in bpf_object__init_struct_ops_maps()
1019 st_ops->tname = tname; in bpf_object__init_struct_ops_maps()
1020 st_ops->type = type; in bpf_object__init_struct_ops_maps()
1021 st_ops->type_id = type_id; in bpf_object__init_struct_ops_maps()
1024 tname, type_id, var_name, vsi->offset); in bpf_object__init_struct_ops_maps()
1041 return ERR_PTR(-ENOMEM); in bpf_object__new()
1044 strcpy(obj->path, path); in bpf_object__new()
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1); in bpf_object__new()
1047 obj->name[sizeof(obj->name) - 1] = 0; in bpf_object__new()
1050 strncpy(obj->name, basename((void *)path), in bpf_object__new()
1051 sizeof(obj->name) - 1); in bpf_object__new()
1052 end = strchr(obj->name, '.'); in bpf_object__new()
1057 obj->efile.fd = -1; in bpf_object__new()
1059 * Caller of this function should also call in bpf_object__new()
1064 obj->efile.obj_buf = obj_buf; in bpf_object__new()
1065 obj->efile.obj_buf_sz = obj_buf_sz; in bpf_object__new()
1066 obj->efile.maps_shndx = -1; in bpf_object__new()
1067 obj->efile.btf_maps_shndx = -1; in bpf_object__new()
1068 obj->efile.data_shndx = -1; in bpf_object__new()
1069 obj->efile.rodata_shndx = -1; in bpf_object__new()
1070 obj->efile.bss_shndx = -1; in bpf_object__new()
1071 obj->efile.st_ops_shndx = -1; in bpf_object__new()
1072 obj->kconfig_map_idx = -1; in bpf_object__new()
1073 obj->rodata_map_idx = -1; in bpf_object__new()
1075 obj->kern_version = get_kernel_version(); in bpf_object__new()
1076 obj->loaded = false; in bpf_object__new()
1078 INIT_LIST_HEAD(&obj->list); in bpf_object__new()
1079 list_add(&obj->list, &bpf_objects_list); in bpf_object__new()
1088 if (obj->efile.elf) { in bpf_object__elf_finish()
1089 elf_end(obj->efile.elf); in bpf_object__elf_finish()
1090 obj->efile.elf = NULL; in bpf_object__elf_finish()
1092 obj->efile.symbols = NULL; in bpf_object__elf_finish()
1093 obj->efile.data = NULL; in bpf_object__elf_finish()
1094 obj->efile.rodata = NULL; in bpf_object__elf_finish()
1095 obj->efile.bss = NULL; in bpf_object__elf_finish()
1096 obj->efile.st_ops_data = NULL; in bpf_object__elf_finish()
1098 zfree(&obj->efile.reloc_sects); in bpf_object__elf_finish()
1099 obj->efile.nr_reloc_sects = 0; in bpf_object__elf_finish()
1100 zclose(obj->efile.fd); in bpf_object__elf_finish()
1101 obj->efile.obj_buf = NULL; in bpf_object__elf_finish()
1102 obj->efile.obj_buf_sz = 0; in bpf_object__elf_finish()
1117 return -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1120 if (obj->efile.obj_buf_sz > 0) { in bpf_object__elf_init()
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, in bpf_object__elf_init()
1126 obj->efile.obj_buf_sz); in bpf_object__elf_init()
1128 obj->efile.fd = open(obj->path, O_RDONLY); in bpf_object__elf_init()
1129 if (obj->efile.fd < 0) { in bpf_object__elf_init()
1132 err = -errno; in bpf_object__elf_init()
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); in bpf_object__elf_init()
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); in bpf_object__elf_init()
1141 if (!obj->efile.elf) { in bpf_object__elf_init()
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1143 err = -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { in bpf_object__elf_init()
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1149 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1152 ep = &obj->efile.ehdr; in bpf_object__elf_init()
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { in bpf_object__elf_init()
1156 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1157 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { in bpf_object__elf_init()
1164 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1165 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1169 if (ep->e_type != ET_REL || in bpf_object__elf_init()
1170 (ep->e_machine && ep->e_machine != EM_BPF)) { in bpf_object__elf_init()
1171 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); in bpf_object__elf_init()
1172 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1185 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) in bpf_object__check_endianness()
1188 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) in bpf_object__check_endianness()
1193 pr_warn("elf: endianness mismatch in %s.\n", obj->path); in bpf_object__check_endianness()
1194 return -LIBBPF_ERRNO__ENDIAN; in bpf_object__check_endianness()
1200 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); in bpf_object__init_license()
1201 pr_debug("license of %s is %s\n", obj->path, obj->license); in bpf_object__init_license()
1211 pr_warn("invalid kver section in %s\n", obj->path); in bpf_object__init_kversion()
1212 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_kversion()
1215 obj->kern_version = kver; in bpf_object__init_kversion()
1216 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); in bpf_object__init_kversion()
1231 int ret = -ENOENT; in bpf_object__section_size()
1235 return -EINVAL; in bpf_object__section_size()
1237 if (obj->efile.data) in bpf_object__section_size()
1238 *size = obj->efile.data->d_size; in bpf_object__section_size()
1240 if (obj->efile.bss) in bpf_object__section_size()
1241 *size = obj->efile.bss->d_size; in bpf_object__section_size()
1243 if (obj->efile.rodata) in bpf_object__section_size()
1244 *size = obj->efile.rodata->d_size; in bpf_object__section_size()
1246 if (obj->efile.st_ops_data) in bpf_object__section_size()
1247 *size = obj->efile.st_ops_data->d_size; in bpf_object__section_size()
1254 *size = data->d_size; in bpf_object__section_size()
1264 Elf_Data *symbols = obj->efile.symbols; in bpf_object__variable_offset()
1269 return -EINVAL; in bpf_object__variable_offset()
1271 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { in bpf_object__variable_offset()
1284 return -EIO; in bpf_object__variable_offset()
1292 return -ENOENT; in bpf_object__variable_offset()
1301 if (obj->nr_maps < obj->maps_cap) in bpf_object__add_map()
1302 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1304 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); in bpf_object__add_map()
1305 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); in bpf_object__add_map()
1308 return ERR_PTR(-ENOMEM); in bpf_object__add_map()
1311 obj->maps_cap = new_cap; in bpf_object__add_map()
1312 obj->maps = new_maps; in bpf_object__add_map()
1315 memset(obj->maps + obj->nr_maps, 0, in bpf_object__add_map()
1316 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); in bpf_object__add_map()
1318 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) in bpf_object__add_map()
1321 for (i = obj->nr_maps; i < obj->maps_cap; i++) { in bpf_object__add_map()
1322 obj->maps[i].fd = -1; in bpf_object__add_map()
1323 obj->maps[i].inner_map_fd = -1; in bpf_object__add_map()
1326 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1334 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; in bpf_map_mmap_sz()
1345 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, in internal_map_name()
1346 strlen(obj->name)); in internal_map_name()
1348 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, in internal_map_name()
1371 map->libbpf_type = type; in bpf_object__init_internal_map()
1372 map->sec_idx = sec_idx; in bpf_object__init_internal_map()
1373 map->sec_offset = 0; in bpf_object__init_internal_map()
1374 map->name = internal_map_name(obj, type); in bpf_object__init_internal_map()
1375 if (!map->name) { in bpf_object__init_internal_map()
1377 return -ENOMEM; in bpf_object__init_internal_map()
1380 def = &map->def; in bpf_object__init_internal_map()
1381 def->type = BPF_MAP_TYPE_ARRAY; in bpf_object__init_internal_map()
1382 def->key_size = sizeof(int); in bpf_object__init_internal_map()
1383 def->value_size = data_sz; in bpf_object__init_internal_map()
1384 def->max_entries = 1; in bpf_object__init_internal_map()
1385 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG in bpf_object__init_internal_map()
1387 def->map_flags |= BPF_F_MMAPABLE; in bpf_object__init_internal_map()
1390 map->name, map->sec_idx, map->sec_offset, def->map_flags); in bpf_object__init_internal_map()
1392 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, in bpf_object__init_internal_map()
1393 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in bpf_object__init_internal_map()
1394 if (map->mmaped == MAP_FAILED) { in bpf_object__init_internal_map()
1395 err = -errno; in bpf_object__init_internal_map()
1396 map->mmaped = NULL; in bpf_object__init_internal_map()
1398 map->name, err); in bpf_object__init_internal_map()
1399 zfree(&map->name); in bpf_object__init_internal_map()
1404 memcpy(map->mmaped, data, data_sz); in bpf_object__init_internal_map()
1406 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); in bpf_object__init_internal_map()
1415 * Populate obj->maps with libbpf internal maps. in bpf_object__init_global_data_maps()
1417 if (obj->efile.data_shndx >= 0) { in bpf_object__init_global_data_maps()
1419 obj->efile.data_shndx, in bpf_object__init_global_data_maps()
1420 obj->efile.data->d_buf, in bpf_object__init_global_data_maps()
1421 obj->efile.data->d_size); in bpf_object__init_global_data_maps()
1425 if (obj->efile.rodata_shndx >= 0) { in bpf_object__init_global_data_maps()
1427 obj->efile.rodata_shndx, in bpf_object__init_global_data_maps()
1428 obj->efile.rodata->d_buf, in bpf_object__init_global_data_maps()
1429 obj->efile.rodata->d_size); in bpf_object__init_global_data_maps()
1433 obj->rodata_map_idx = obj->nr_maps - 1; in bpf_object__init_global_data_maps()
1435 if (obj->efile.bss_shndx >= 0) { in bpf_object__init_global_data_maps()
1437 obj->efile.bss_shndx, in bpf_object__init_global_data_maps()
1439 obj->efile.bss->d_size); in bpf_object__init_global_data_maps()
1452 for (i = 0; i < obj->nr_extern; i++) { in find_extern_by_name()
1453 if (strcmp(obj->externs[i].name, name) == 0) in find_extern_by_name()
1454 return &obj->externs[i]; in find_extern_by_name()
1462 switch (ext->kcfg.type) { in set_kcfg_value_tri()
1466 ext->name, value); in set_kcfg_value_tri()
1467 return -EINVAL; in set_kcfg_value_tri()
1487 ext->name, value); in set_kcfg_value_tri()
1488 return -EINVAL; in set_kcfg_value_tri()
1490 ext->is_set = true; in set_kcfg_value_tri()
1499 if (ext->kcfg.type != KCFG_CHAR_ARR) { in set_kcfg_value_str()
1500 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); in set_kcfg_value_str()
1501 return -EINVAL; in set_kcfg_value_str()
1505 if (value[len - 1] != '"') { in set_kcfg_value_str()
1507 ext->name, value); in set_kcfg_value_str()
1508 return -EINVAL; in set_kcfg_value_str()
1512 len -= 2; in set_kcfg_value_str()
1513 if (len >= ext->kcfg.sz) { in set_kcfg_value_str()
1515 ext->name, value, len, ext->kcfg.sz - 1); in set_kcfg_value_str()
1516 len = ext->kcfg.sz - 1; in set_kcfg_value_str()
1520 ext->is_set = true; in set_kcfg_value_str()
1532 err = -errno; in parse_u64()
1538 return -EINVAL; in parse_u64()
1545 int bit_sz = ext->kcfg.sz * 8; in is_kcfg_value_in_range()
1547 if (ext->kcfg.sz == 8) in is_kcfg_value_in_range()
1550 /* Validate that value stored in u64 fits in integer of `ext->sz` in is_kcfg_value_in_range()
1555 * -2^(Y-1) <= X <= 2^(Y-1) - 1 in is_kcfg_value_in_range()
1556 * 0 <= X + 2^(Y-1) <= 2^Y - 1 in is_kcfg_value_in_range()
1557 * 0 <= X + 2^(Y-1) < 2^Y in is_kcfg_value_in_range()
1559 * For unsigned target integer, check that all the (64 - Y) bits are in is_kcfg_value_in_range()
1562 if (ext->kcfg.is_signed) in is_kcfg_value_in_range()
1563 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); in is_kcfg_value_in_range()
1571 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { in set_kcfg_value_num()
1573 ext->name, (unsigned long long)value); in set_kcfg_value_num()
1574 return -EINVAL; in set_kcfg_value_num()
1578 ext->name, (unsigned long long)value, ext->kcfg.sz); in set_kcfg_value_num()
1579 return -ERANGE; in set_kcfg_value_num()
1581 switch (ext->kcfg.sz) { in set_kcfg_value_num()
1587 return -EINVAL; in set_kcfg_value_num()
1589 ext->is_set = true; in set_kcfg_value_num()
1608 return -EINVAL; in bpf_object__process_kconfig_line()
1613 if (buf[len - 1] == '\n') in bpf_object__process_kconfig_line()
1614 buf[len - 1] = '\0'; in bpf_object__process_kconfig_line()
1620 return -EINVAL; in bpf_object__process_kconfig_line()
1624 if (!ext || ext->is_set) in bpf_object__process_kconfig_line()
1627 ext_val = data + ext->kcfg.data_off; in bpf_object__process_kconfig_line()
1642 ext->name, value); in bpf_object__process_kconfig_line()
1650 pr_debug("extern (kcfg) %s=%s\n", ext->name, value); in bpf_object__process_kconfig_line()
1662 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); in bpf_object__read_kconfig_file()
1664 return -EINVAL; in bpf_object__read_kconfig_file()
1666 return -ENAMETOOLONG; in bpf_object__read_kconfig_file()
1675 return -ENOENT; in bpf_object__read_kconfig_file()
1701 err = -errno; in bpf_object__read_kconfig_mem()
1702 pr_warn("failed to open in-memory Kconfig: %d\n", err); in bpf_object__read_kconfig_mem()
1709 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", in bpf_object__read_kconfig_mem()
1725 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__init_kconfig_map()
1726 ext = &obj->externs[i]; in bpf_object__init_kconfig_map()
1727 if (ext->type == EXT_KCFG) in bpf_object__init_kconfig_map()
1734 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; in bpf_object__init_kconfig_map()
1736 obj->efile.symbols_shndx, in bpf_object__init_kconfig_map()
1741 obj->kconfig_map_idx = obj->nr_maps - 1; in bpf_object__init_kconfig_map()
1748 Elf_Data *symbols = obj->efile.symbols; in bpf_object__init_user_maps()
1753 if (obj->efile.maps_shndx < 0) in bpf_object__init_user_maps()
1757 return -EINVAL; in bpf_object__init_user_maps()
1760 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); in bpf_object__init_user_maps()
1764 obj->path); in bpf_object__init_user_maps()
1765 return -EINVAL; in bpf_object__init_user_maps()
1775 nr_syms = symbols->d_size / sizeof(GElf_Sym); in bpf_object__init_user_maps()
1781 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1787 nr_maps, data->d_size, obj->path); in bpf_object__init_user_maps()
1789 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { in bpf_object__init_user_maps()
1791 obj->path); in bpf_object__init_user_maps()
1792 return -EINVAL; in bpf_object__init_user_maps()
1794 map_def_sz = data->d_size / nr_maps; in bpf_object__init_user_maps()
1796 /* Fill obj->maps using data in "maps" section. */ in bpf_object__init_user_maps()
1805 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1815 i, obj->path); in bpf_object__init_user_maps()
1816 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_user_maps()
1819 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_maps()
1820 map->sec_idx = sym.st_shndx; in bpf_object__init_user_maps()
1821 map->sec_offset = sym.st_value; in bpf_object__init_user_maps()
1823 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_maps()
1824 if (sym.st_value + map_def_sz > data->d_size) { in bpf_object__init_user_maps()
1826 obj->path, map_name); in bpf_object__init_user_maps()
1827 return -EINVAL; in bpf_object__init_user_maps()
1830 map->name = strdup(map_name); in bpf_object__init_user_maps()
1831 if (!map->name) { in bpf_object__init_user_maps()
1833 return -ENOMEM; in bpf_object__init_user_maps()
1835 pr_debug("map %d is \"%s\"\n", i, map->name); in bpf_object__init_user_maps()
1836 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); in bpf_object__init_user_maps()
1844 memcpy(&map->def, def, map_def_sz); in bpf_object__init_user_maps()
1857 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", in bpf_object__init_user_maps()
1858 obj->path, map_name); in bpf_object__init_user_maps()
1860 return -EINVAL; in bpf_object__init_user_maps()
1863 memcpy(&map->def, def, sizeof(struct bpf_map_def)); in bpf_object__init_user_maps()
1879 *res_id = t->type; in skip_mods_and_typedefs()
1880 t = btf__type_by_id(btf, t->type); in skip_mods_and_typedefs()
1895 t = skip_mods_and_typedefs(btf, t->type, res_id); in resolve_func_ptr()
1933 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); in get_map_field_int()
1934 const char *name = btf__name_by_offset(btf, m->name_off); in get_map_field_int()
1944 arr_t = btf__type_by_id(btf, t->type); in get_map_field_int()
1947 map_name, name, t->type); in get_map_field_int()
1956 *res = arr_info->nelems; in get_map_field_int()
1970 return -EINVAL; in build_map_pin_path()
1972 return -ENAMETOOLONG; in build_map_pin_path()
1991 const char *name = btf__name_by_offset(obj->btf, m->name_off); in parse_btf_map_def()
1994 pr_warn("map '%s': invalid field #%d.\n", map->name, i); in parse_btf_map_def()
1995 return -EINVAL; in parse_btf_map_def()
1998 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
1999 &map->def.type)) in parse_btf_map_def()
2000 return -EINVAL; in parse_btf_map_def()
2002 map->name, map->def.type); in parse_btf_map_def()
2004 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2005 &map->def.max_entries)) in parse_btf_map_def()
2006 return -EINVAL; in parse_btf_map_def()
2008 map->name, map->def.max_entries); in parse_btf_map_def()
2010 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2011 &map->def.map_flags)) in parse_btf_map_def()
2012 return -EINVAL; in parse_btf_map_def()
2014 map->name, map->def.map_flags); in parse_btf_map_def()
2016 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) in parse_btf_map_def()
2017 return -EINVAL; in parse_btf_map_def()
2018 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); in parse_btf_map_def()
2022 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2023 return -EINVAL; in parse_btf_map_def()
2025 map->name, sz); in parse_btf_map_def()
2026 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2028 map->name, map->def.key_size, sz); in parse_btf_map_def()
2029 return -EINVAL; in parse_btf_map_def()
2031 map->def.key_size = sz; in parse_btf_map_def()
2035 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2038 map->name, m->type); in parse_btf_map_def()
2039 return -EINVAL; in parse_btf_map_def()
2043 map->name, btf_kind_str(t)); in parse_btf_map_def()
2044 return -EINVAL; in parse_btf_map_def()
2046 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2049 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2053 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2054 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2056 map->name, map->def.key_size, (ssize_t)sz); in parse_btf_map_def()
2057 return -EINVAL; in parse_btf_map_def()
2059 map->def.key_size = sz; in parse_btf_map_def()
2060 map->btf_key_type_id = t->type; in parse_btf_map_def()
2064 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2065 return -EINVAL; in parse_btf_map_def()
2067 map->name, sz); in parse_btf_map_def()
2068 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2070 map->name, map->def.value_size, sz); in parse_btf_map_def()
2071 return -EINVAL; in parse_btf_map_def()
2073 map->def.value_size = sz; in parse_btf_map_def()
2077 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2080 map->name, m->type); in parse_btf_map_def()
2081 return -EINVAL; in parse_btf_map_def()
2085 map->name, btf_kind_str(t)); in parse_btf_map_def()
2086 return -EINVAL; in parse_btf_map_def()
2088 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2091 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2095 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2096 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2098 map->name, map->def.value_size, (ssize_t)sz); in parse_btf_map_def()
2099 return -EINVAL; in parse_btf_map_def()
2101 map->def.value_size = sz; in parse_btf_map_def()
2102 map->btf_value_type_id = t->type; in parse_btf_map_def()
2108 pr_warn("map '%s': multi-level inner maps not supported.\n", in parse_btf_map_def()
2109 map->name); in parse_btf_map_def()
2110 return -ENOTSUP; in parse_btf_map_def()
2112 if (i != vlen - 1) { in parse_btf_map_def()
2114 map->name, name); in parse_btf_map_def()
2115 return -EINVAL; in parse_btf_map_def()
2117 if (!bpf_map_type__is_map_in_map(map->def.type)) { in parse_btf_map_def()
2118 pr_warn("map '%s': should be map-in-map.\n", in parse_btf_map_def()
2119 map->name); in parse_btf_map_def()
2120 return -ENOTSUP; in parse_btf_map_def()
2122 if (map->def.value_size && map->def.value_size != 4) { in parse_btf_map_def()
2124 map->name, map->def.value_size); in parse_btf_map_def()
2125 return -EINVAL; in parse_btf_map_def()
2127 map->def.value_size = 4; in parse_btf_map_def()
2128 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2130 pr_warn("map '%s': map-in-map inner type [%d] not found.\n", in parse_btf_map_def()
2131 map->name, m->type); in parse_btf_map_def()
2132 return -EINVAL; in parse_btf_map_def()
2134 if (!btf_is_array(t) || btf_array(t)->nelems) { in parse_btf_map_def()
2135 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", in parse_btf_map_def()
2136 map->name); in parse_btf_map_def()
2137 return -EINVAL; in parse_btf_map_def()
2139 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, in parse_btf_map_def()
2142 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2143 map->name, btf_kind_str(t)); in parse_btf_map_def()
2144 return -EINVAL; in parse_btf_map_def()
2146 t = skip_mods_and_typedefs(obj->btf, t->type, NULL); in parse_btf_map_def()
2148 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2149 map->name, btf_kind_str(t)); in parse_btf_map_def()
2150 return -EINVAL; in parse_btf_map_def()
2153 map->inner_map = calloc(1, sizeof(*map->inner_map)); in parse_btf_map_def()
2154 if (!map->inner_map) in parse_btf_map_def()
2155 return -ENOMEM; in parse_btf_map_def()
2156 map->inner_map->sec_idx = obj->efile.btf_maps_shndx; in parse_btf_map_def()
2157 map->inner_map->name = malloc(strlen(map->name) + in parse_btf_map_def()
2159 if (!map->inner_map->name) in parse_btf_map_def()
2160 return -ENOMEM; in parse_btf_map_def()
2161 sprintf(map->inner_map->name, "%s.inner", map->name); in parse_btf_map_def()
2163 err = parse_btf_map_def(obj, map->inner_map, t, strict, in parse_btf_map_def()
2173 map->name); in parse_btf_map_def()
2174 return -EINVAL; in parse_btf_map_def()
2176 if (!get_map_field_int(map->name, obj->btf, m, &val)) in parse_btf_map_def()
2177 return -EINVAL; in parse_btf_map_def()
2179 map->name, val); in parse_btf_map_def()
2184 map->name, val); in parse_btf_map_def()
2185 return -EINVAL; in parse_btf_map_def()
2191 map->name); in parse_btf_map_def()
2198 map->name, name); in parse_btf_map_def()
2199 return -ENOTSUP; in parse_btf_map_def()
2202 map->name, name); in parse_btf_map_def()
2206 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { in parse_btf_map_def()
2207 pr_warn("map '%s': map type isn't specified.\n", map->name); in parse_btf_map_def()
2208 return -EINVAL; in parse_btf_map_def()
2227 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__init_user_btf_map()
2229 map_name = btf__name_by_offset(obj->btf, var->name_off); in bpf_object__init_user_btf_map()
2233 return -EINVAL; in bpf_object__init_user_btf_map()
2235 if ((__u64)vi->offset + vi->size > data->d_size) { in bpf_object__init_user_btf_map()
2237 return -EINVAL; in bpf_object__init_user_btf_map()
2242 return -EINVAL; in bpf_object__init_user_btf_map()
2244 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && in bpf_object__init_user_btf_map()
2245 var_extra->linkage != BTF_VAR_STATIC) { in bpf_object__init_user_btf_map()
2247 map_name, var_extra->linkage); in bpf_object__init_user_btf_map()
2248 return -EOPNOTSUPP; in bpf_object__init_user_btf_map()
2251 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__init_user_btf_map()
2255 return -EINVAL; in bpf_object__init_user_btf_map()
2257 if (def->size > vi->size) { in bpf_object__init_user_btf_map()
2259 return -EINVAL; in bpf_object__init_user_btf_map()
2265 map->name = strdup(map_name); in bpf_object__init_user_btf_map()
2266 if (!map->name) { in bpf_object__init_user_btf_map()
2268 return -ENOMEM; in bpf_object__init_user_btf_map()
2270 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_btf_map()
2271 map->def.type = BPF_MAP_TYPE_UNSPEC; in bpf_object__init_user_btf_map()
2272 map->sec_idx = sec_idx; in bpf_object__init_user_btf_map()
2273 map->sec_offset = vi->offset; in bpf_object__init_user_btf_map()
2274 map->btf_var_idx = var_idx; in bpf_object__init_user_btf_map()
2276 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_btf_map()
2291 if (obj->efile.btf_maps_shndx < 0) in bpf_object__init_user_btf_maps()
2294 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); in bpf_object__init_user_btf_maps()
2298 MAPS_ELF_SEC, obj->path); in bpf_object__init_user_btf_maps()
2299 return -EINVAL; in bpf_object__init_user_btf_maps()
2302 nr_types = btf__get_nr_types(obj->btf); in bpf_object__init_user_btf_maps()
2304 t = btf__type_by_id(obj->btf, i); in bpf_object__init_user_btf_maps()
2307 name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__init_user_btf_maps()
2310 obj->efile.btf_maps_sec_btf_id = i; in bpf_object__init_user_btf_maps()
2317 return -ENOENT; in bpf_object__init_user_btf_maps()
2323 obj->efile.btf_maps_shndx, in bpf_object__init_user_btf_maps()
2386 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); in bpf_object__sanitize_btf()
2392 t->size = 1; in bpf_object__sanitize_btf()
2401 name = (char *)btf__name_by_offset(btf, t->name_off); in bpf_object__sanitize_btf()
2409 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); in bpf_object__sanitize_btf()
2412 m->offset = v->offset * 8; in bpf_object__sanitize_btf()
2413 m->type = v->type; in bpf_object__sanitize_btf()
2415 vt = (void *)btf__type_by_id(btf, v->type); in bpf_object__sanitize_btf()
2416 m->name_off = vt->name_off; in bpf_object__sanitize_btf()
2421 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); in bpf_object__sanitize_btf()
2422 t->size = sizeof(__u32); /* kernel enforced */ in bpf_object__sanitize_btf()
2425 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); in bpf_object__sanitize_btf()
2428 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); in bpf_object__sanitize_btf()
2435 return obj->efile.btf_maps_shndx >= 0 || in libbpf_needs_btf()
2436 obj->efile.st_ops_shndx >= 0 || in libbpf_needs_btf()
2437 obj->nr_extern > 0; in libbpf_needs_btf()
2442 return obj->efile.st_ops_shndx >= 0; in kernel_needs_btf()
2449 int err = -ENOENT; in bpf_object__init_btf()
2452 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); in bpf_object__init_btf()
2453 if (IS_ERR(obj->btf)) { in bpf_object__init_btf()
2454 err = PTR_ERR(obj->btf); in bpf_object__init_btf()
2455 obj->btf = NULL; in bpf_object__init_btf()
2460 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__init_btf()
2461 btf__set_pointer_size(obj->btf, 8); in bpf_object__init_btf()
2465 if (!obj->btf) { in bpf_object__init_btf()
2470 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, in bpf_object__init_btf()
2471 btf_ext_data->d_size); in bpf_object__init_btf()
2472 if (IS_ERR(obj->btf_ext)) { in bpf_object__init_btf()
2474 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); in bpf_object__init_btf()
2475 obj->btf_ext = NULL; in bpf_object__init_btf()
2491 if (!obj->btf) in bpf_object__finalize_btf()
2494 err = btf__finalize_data(obj, obj->btf); in bpf_object__finalize_btf()
2505 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in libbpf_prog_needs_vmlinux_btf()
2506 prog->type == BPF_PROG_TYPE_LSM) in libbpf_prog_needs_vmlinux_btf()
2512 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) in libbpf_prog_needs_vmlinux_btf()
2524 /* CO-RE relocations need kernel BTF */ in bpf_object__load_vmlinux_btf()
2525 if (obj->btf_ext && obj->btf_ext->core_relo_info.len) in bpf_object__load_vmlinux_btf()
2529 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__load_vmlinux_btf()
2532 ext = &obj->externs[i]; in bpf_object__load_vmlinux_btf()
2533 if (ext->type == EXT_KSYM && ext->ksym.type_id) { in bpf_object__load_vmlinux_btf()
2540 if (!prog->load) in bpf_object__load_vmlinux_btf()
2551 obj->btf_vmlinux = libbpf_find_kernel_btf(); in bpf_object__load_vmlinux_btf()
2552 if (IS_ERR(obj->btf_vmlinux)) { in bpf_object__load_vmlinux_btf()
2553 err = PTR_ERR(obj->btf_vmlinux); in bpf_object__load_vmlinux_btf()
2555 obj->btf_vmlinux = NULL; in bpf_object__load_vmlinux_btf()
2563 struct btf *kern_btf = obj->btf; in bpf_object__sanitize_and_load_btf()
2567 if (!obj->btf) in bpf_object__sanitize_and_load_btf()
2572 err = -EOPNOTSUPP; in bpf_object__sanitize_and_load_btf()
2585 raw_data = btf__get_raw_data(obj->btf, &sz); in bpf_object__sanitize_and_load_btf()
2590 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__sanitize_and_load_btf()
2591 btf__set_pointer_size(obj->btf, 8); in bpf_object__sanitize_and_load_btf()
2599 btf__set_fd(obj->btf, btf__fd(kern_btf)); in bpf_object__sanitize_and_load_btf()
2600 btf__set_fd(kern_btf, -1); in bpf_object__sanitize_and_load_btf()
2620 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); in elf_sym_str()
2623 off, obj->path, elf_errmsg(-1)); in elf_sym_str()
2634 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); in elf_sec_str()
2637 off, obj->path, elf_errmsg(-1)); in elf_sec_str()
2648 scn = elf_getscn(obj->efile.elf, idx); in elf_sec_by_idx()
2651 idx, obj->path, elf_errmsg(-1)); in elf_sec_by_idx()
2660 Elf *elf = obj->efile.elf; in elf_sec_by_name()
2679 return -EINVAL; in elf_sec_hdr()
2683 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_hdr()
2684 return -EINVAL; in elf_sec_hdr()
2704 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_name()
2722 obj->path, elf_errmsg(-1)); in elf_sec_data()
2732 Elf_Data *symbols = obj->efile.symbols; in elf_sym_by_sec_off()
2733 size_t n = symbols->d_size / sizeof(GElf_Sym); in elf_sym_by_sec_off()
2739 if (sym->st_shndx != sec_idx || sym->st_value != off) in elf_sym_by_sec_off()
2741 if (GELF_ST_TYPE(sym->st_info) != sym_type) in elf_sym_by_sec_off()
2746 return -ENOENT; in elf_sym_by_sec_off()
2752 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; in is_sec_name_dwarf()
2758 if (hdr->sh_type == SHT_STRTAB) in ignore_elf_section()
2762 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) in ignore_elf_section()
2766 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && in ignore_elf_section()
2774 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { in ignore_elf_section()
2775 name += sizeof(".rel") - 1; in ignore_elf_section()
2794 if (a->sec_idx != b->sec_idx) in cmp_progs()
2795 return a->sec_idx < b->sec_idx ? -1 : 1; in cmp_progs()
2798 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; in cmp_progs()
2803 Elf *elf = obj->efile.elf; in bpf_object__elf_collect()
2818 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2821 if (obj->efile.symbols) { in bpf_object__elf_collect()
2822 pr_warn("elf: multiple symbol tables in %s\n", obj->path); in bpf_object__elf_collect()
2823 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2828 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2830 obj->efile.symbols = data; in bpf_object__elf_collect()
2831 obj->efile.symbols_shndx = elf_ndxscn(scn); in bpf_object__elf_collect()
2832 obj->efile.strtabidx = sh.sh_link; in bpf_object__elf_collect()
2841 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2845 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2852 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2855 idx, name, (unsigned long)data->d_size, in bpf_object__elf_collect()
2860 err = bpf_object__init_license(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2864 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2868 obj->efile.maps_shndx = idx; in bpf_object__elf_collect()
2870 obj->efile.btf_maps_shndx = idx; in bpf_object__elf_collect()
2877 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { in bpf_object__elf_collect()
2880 obj->efile.text_shndx = idx; in bpf_object__elf_collect()
2885 obj->efile.data = data; in bpf_object__elf_collect()
2886 obj->efile.data_shndx = idx; in bpf_object__elf_collect()
2888 obj->efile.rodata = data; in bpf_object__elf_collect()
2889 obj->efile.rodata_shndx = idx; in bpf_object__elf_collect()
2891 obj->efile.st_ops_data = data; in bpf_object__elf_collect()
2892 obj->efile.st_ops_shndx = idx; in bpf_object__elf_collect()
2898 int nr_sects = obj->efile.nr_reloc_sects; in bpf_object__elf_collect()
2899 void *sects = obj->efile.reloc_sects; in bpf_object__elf_collect()
2913 sizeof(*obj->efile.reloc_sects)); in bpf_object__elf_collect()
2915 return -ENOMEM; in bpf_object__elf_collect()
2917 obj->efile.reloc_sects = sects; in bpf_object__elf_collect()
2918 obj->efile.nr_reloc_sects++; in bpf_object__elf_collect()
2920 obj->efile.reloc_sects[nr_sects].shdr = sh; in bpf_object__elf_collect()
2921 obj->efile.reloc_sects[nr_sects].data = data; in bpf_object__elf_collect()
2923 obj->efile.bss = data; in bpf_object__elf_collect()
2924 obj->efile.bss_shndx = idx; in bpf_object__elf_collect()
2931 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { in bpf_object__elf_collect()
2932 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); in bpf_object__elf_collect()
2933 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2936 /* sort BPF programs by section name and in-section instruction offset in bpf_object__elf_collect()
2938 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); in bpf_object__elf_collect()
2945 int bind = GELF_ST_BIND(sym->st_info); in sym_is_extern()
2947 return sym->st_shndx == SHN_UNDEF && in sym_is_extern()
2949 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; in sym_is_extern()
2959 return -ESRCH; in find_extern_btf_id()
2968 var_name = btf__name_by_offset(btf, t->name_off); in find_extern_btf_id()
2972 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) in find_extern_btf_id()
2973 return -EINVAL; in find_extern_btf_id()
2978 return -ENOENT; in find_extern_btf_id()
2987 return -ESRCH; in find_extern_sec_btf_id()
2998 if (vs->type == ext_btf_id) in find_extern_sec_btf_id()
3003 return -ENOENT; in find_extern_sec_btf_id()
3013 name = btf__name_by_offset(btf, t->name_off); in find_kcfg_type()
3022 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; in find_kcfg_type()
3025 if (t->size == 1) in find_kcfg_type()
3027 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) in find_kcfg_type()
3032 if (t->size != 4) in find_kcfg_type()
3038 if (btf_array(t)->nelems == 0) in find_kcfg_type()
3040 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) in find_kcfg_type()
3053 if (a->type != b->type) in cmp_externs()
3054 return a->type < b->type ? -1 : 1; in cmp_externs()
3056 if (a->type == EXT_KCFG) { in cmp_externs()
3058 if (a->kcfg.align != b->kcfg.align) in cmp_externs()
3059 return a->kcfg.align > b->kcfg.align ? -1 : 1; in cmp_externs()
3061 if (a->kcfg.sz != b->kcfg.sz) in cmp_externs()
3062 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; in cmp_externs()
3066 return strcmp(a->name, b->name); in cmp_externs()
3095 if (!obj->efile.symbols) in bpf_object__collect_externs()
3098 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); in bpf_object__collect_externs()
3100 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3108 if (!gelf_getsym(obj->efile.symbols, i, &sym)) in bpf_object__collect_externs()
3109 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3116 ext = obj->externs; in bpf_object__collect_externs()
3117 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); in bpf_object__collect_externs()
3119 return -ENOMEM; in bpf_object__collect_externs()
3120 obj->externs = ext; in bpf_object__collect_externs()
3121 ext = &ext[obj->nr_extern]; in bpf_object__collect_externs()
3123 obj->nr_extern++; in bpf_object__collect_externs()
3125 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); in bpf_object__collect_externs()
3126 if (ext->btf_id <= 0) { in bpf_object__collect_externs()
3128 ext_name, ext->btf_id); in bpf_object__collect_externs()
3129 return ext->btf_id; in bpf_object__collect_externs()
3131 t = btf__type_by_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3132 ext->name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3133 ext->sym_idx = i; in bpf_object__collect_externs()
3134 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; in bpf_object__collect_externs()
3136 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3137 if (ext->sec_btf_id <= 0) { in bpf_object__collect_externs()
3139 ext_name, ext->btf_id, ext->sec_btf_id); in bpf_object__collect_externs()
3140 return ext->sec_btf_id; in bpf_object__collect_externs()
3142 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); in bpf_object__collect_externs()
3143 sec_name = btf__name_by_offset(obj->btf, sec->name_off); in bpf_object__collect_externs()
3147 ext->type = EXT_KCFG; in bpf_object__collect_externs()
3148 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); in bpf_object__collect_externs()
3149 if (ext->kcfg.sz <= 0) { in bpf_object__collect_externs()
3151 ext_name, ext->kcfg.sz); in bpf_object__collect_externs()
3152 return ext->kcfg.sz; in bpf_object__collect_externs()
3154 ext->kcfg.align = btf__align_of(obj->btf, t->type); in bpf_object__collect_externs()
3155 if (ext->kcfg.align <= 0) { in bpf_object__collect_externs()
3157 ext_name, ext->kcfg.align); in bpf_object__collect_externs()
3158 return -EINVAL; in bpf_object__collect_externs()
3160 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, in bpf_object__collect_externs()
3161 &ext->kcfg.is_signed); in bpf_object__collect_externs()
3162 if (ext->kcfg.type == KCFG_UNKNOWN) { in bpf_object__collect_externs()
3164 return -ENOTSUP; in bpf_object__collect_externs()
3168 ext->type = EXT_KSYM; in bpf_object__collect_externs()
3169 skip_mods_and_typedefs(obj->btf, t->type, in bpf_object__collect_externs()
3170 &ext->ksym.type_id); in bpf_object__collect_externs()
3173 return -ENOTSUP; in bpf_object__collect_externs()
3176 pr_debug("collected %d externs total\n", obj->nr_extern); in bpf_object__collect_externs()
3178 if (!obj->nr_extern) in bpf_object__collect_externs()
3182 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); in bpf_object__collect_externs()
3186 * pretending that each extern is a 8-byte variable in bpf_object__collect_externs()
3189 /* find existing 4-byte integer type in BTF to use for fake in bpf_object__collect_externs()
3192 int int_btf_id = find_int_btf_id(obj->btf); in bpf_object__collect_externs()
3194 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3195 ext = &obj->externs[i]; in bpf_object__collect_externs()
3196 if (ext->type != EXT_KSYM) in bpf_object__collect_externs()
3199 i, ext->sym_idx, ext->name); in bpf_object__collect_externs()
3208 vt = (void *)btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3209 ext_name = btf__name_by_offset(obj->btf, vt->name_off); in bpf_object__collect_externs()
3214 return -ESRCH; in bpf_object__collect_externs()
3216 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3217 vt->type = int_btf_id; in bpf_object__collect_externs()
3218 vs->offset = off; in bpf_object__collect_externs()
3219 vs->size = sizeof(int); in bpf_object__collect_externs()
3221 sec->size = off; in bpf_object__collect_externs()
3228 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3229 ext = &obj->externs[i]; in bpf_object__collect_externs()
3230 if (ext->type != EXT_KCFG) in bpf_object__collect_externs()
3233 ext->kcfg.data_off = roundup(off, ext->kcfg.align); in bpf_object__collect_externs()
3234 off = ext->kcfg.data_off + ext->kcfg.sz; in bpf_object__collect_externs()
3236 i, ext->sym_idx, ext->kcfg.data_off, ext->name); in bpf_object__collect_externs()
3238 sec->size = off; in bpf_object__collect_externs()
3243 t = btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3244 ext_name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3249 return -ESRCH; in bpf_object__collect_externs()
3251 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3252 vs->offset = ext->kcfg.data_off; in bpf_object__collect_externs()
3265 if (pos->sec_name && !strcmp(pos->sec_name, title)) in bpf_object__find_program_by_title()
3274 /* For legacy reasons, libbpf supports an entry-point BPF programs in prog_is_subprog()
3277 * must be subprograms called from entry-point BPF programs in in prog_is_subprog()
3280 * Similarly, if there is a function/program in .text and at least one in prog_is_subprog()
3284 * SEC()-designated BPF programs and .text entry-point BPF programs. in prog_is_subprog()
3286 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; in prog_is_subprog()
3298 if (!strcmp(prog->name, name)) in bpf_object__find_program_by_name()
3307 return shndx == obj->efile.data_shndx || in bpf_object__shndx_is_data()
3308 shndx == obj->efile.bss_shndx || in bpf_object__shndx_is_data()
3309 shndx == obj->efile.rodata_shndx; in bpf_object__shndx_is_data()
3315 return shndx == obj->efile.maps_shndx || in bpf_object__shndx_is_maps()
3316 shndx == obj->efile.btf_maps_shndx; in bpf_object__shndx_is_maps()
3322 if (shndx == obj->efile.data_shndx) in bpf_object__section_to_libbpf_map_type()
3324 else if (shndx == obj->efile.bss_shndx) in bpf_object__section_to_libbpf_map_type()
3326 else if (shndx == obj->efile.rodata_shndx) in bpf_object__section_to_libbpf_map_type()
3328 else if (shndx == obj->efile.symbols_shndx) in bpf_object__section_to_libbpf_map_type()
3339 struct bpf_insn *insn = &prog->insns[insn_idx]; in bpf_program__record_reloc()
3340 size_t map_idx, nr_maps = prog->obj->nr_maps; in bpf_program__record_reloc()
3341 struct bpf_object *obj = prog->obj; in bpf_program__record_reloc()
3342 __u32 shdr_idx = sym->st_shndx; in bpf_program__record_reloc()
3347 reloc_desc->processed = false; in bpf_program__record_reloc()
3349 /* sub-program call relocation */ in bpf_program__record_reloc()
3350 if (insn->code == (BPF_JMP | BPF_CALL)) { in bpf_program__record_reloc()
3351 if (insn->src_reg != BPF_PSEUDO_CALL) { in bpf_program__record_reloc()
3352 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); in bpf_program__record_reloc()
3353 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3356 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { in bpf_program__record_reloc()
3359 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3360 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3362 if (sym->st_value % BPF_INSN_SZ) { in bpf_program__record_reloc()
3364 prog->name, sym_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3365 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3367 reloc_desc->type = RELO_CALL; in bpf_program__record_reloc()
3368 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3369 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3373 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) { in bpf_program__record_reloc()
3375 prog->name, sym_name, insn_idx, insn->code); in bpf_program__record_reloc()
3376 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3380 int sym_idx = GELF_R_SYM(rel->r_info); in bpf_program__record_reloc()
3381 int i, n = obj->nr_extern; in bpf_program__record_reloc()
3385 ext = &obj->externs[i]; in bpf_program__record_reloc()
3386 if (ext->sym_idx == sym_idx) in bpf_program__record_reloc()
3391 prog->name, sym_name, sym_idx); in bpf_program__record_reloc()
3392 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3395 prog->name, i, ext->name, ext->sym_idx, insn_idx); in bpf_program__record_reloc()
3396 reloc_desc->type = RELO_EXTERN; in bpf_program__record_reloc()
3397 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3398 reloc_desc->sym_off = i; /* sym_off stores extern index */ in bpf_program__record_reloc()
3404 prog->name, sym_name, shdr_idx); in bpf_program__record_reloc()
3405 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3415 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3416 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3419 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3420 if (map->libbpf_type != type || in bpf_program__record_reloc()
3421 map->sec_idx != sym->st_shndx || in bpf_program__record_reloc()
3422 map->sec_offset != sym->st_value) in bpf_program__record_reloc()
3425 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3426 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3431 prog->name, sym_sec_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3432 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3434 reloc_desc->type = RELO_LD64; in bpf_program__record_reloc()
3435 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3436 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3437 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ in bpf_program__record_reloc()
3444 prog->name, sym_sec_name); in bpf_program__record_reloc()
3445 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3448 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3449 if (map->libbpf_type != type) in bpf_program__record_reloc()
3452 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3453 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3458 prog->name, sym_sec_name); in bpf_program__record_reloc()
3459 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3462 reloc_desc->type = RELO_DATA; in bpf_program__record_reloc()
3463 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3464 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3465 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3471 return insn_idx >= prog->sec_insn_off && in prog_contains_insn()
3472 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; in prog_contains_insn()
3478 int l = 0, r = obj->nr_programs - 1, m; in find_prog_by_sec_insn()
3482 m = l + (r - l + 1) / 2; in find_prog_by_sec_insn()
3483 prog = &obj->programs[m]; in find_prog_by_sec_insn()
3485 if (prog->sec_idx < sec_idx || in find_prog_by_sec_insn()
3486 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) in find_prog_by_sec_insn()
3489 r = m - 1; in find_prog_by_sec_insn()
3494 prog = &obj->programs[l]; in find_prog_by_sec_insn()
3495 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) in find_prog_by_sec_insn()
3503 Elf_Data *symbols = obj->efile.symbols; in bpf_object__collect_prog_relos()
3505 size_t sec_idx = shdr->sh_info; in bpf_object__collect_prog_relos()
3514 relo_sec_name = elf_sec_str(obj, shdr->sh_name); in bpf_object__collect_prog_relos()
3517 return -EINVAL; in bpf_object__collect_prog_relos()
3521 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_prog_relos()
3526 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3531 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3536 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3541 * relocations against the section that contains a function; in bpf_object__collect_prog_relos()
3559 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_prog_relos()
3562 relos = libbpf_reallocarray(prog->reloc_desc, in bpf_object__collect_prog_relos()
3563 prog->nr_reloc + 1, sizeof(*relos)); in bpf_object__collect_prog_relos()
3565 return -ENOMEM; in bpf_object__collect_prog_relos()
3566 prog->reloc_desc = relos; in bpf_object__collect_prog_relos()
3569 insn_idx -= prog->sec_insn_off; in bpf_object__collect_prog_relos()
3570 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], in bpf_object__collect_prog_relos()
3575 prog->nr_reloc++; in bpf_object__collect_prog_relos()
3582 struct bpf_map_def *def = &map->def; in bpf_map_find_btf_info()
3586 /* if it's BTF-defined map, we don't need to search for type IDs. in bpf_map_find_btf_info()
3590 if (map->sec_idx == obj->efile.btf_maps_shndx || in bpf_map_find_btf_info()
3595 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, in bpf_map_find_btf_info()
3596 def->value_size, &key_type_id, in bpf_map_find_btf_info()
3603 ret = btf__find_by_name(obj->btf, in bpf_map_find_btf_info()
3604 libbpf_type_to_btf_name[map->libbpf_type]); in bpf_map_find_btf_info()
3609 map->btf_key_type_id = key_type_id; in bpf_map_find_btf_info()
3610 map->btf_value_type_id = bpf_map__is_internal(map) ? in bpf_map_find_btf_info()
3628 return -errno; in bpf_map__reuse_fd()
3632 err = -errno; in bpf_map__reuse_fd()
3638 err = -errno; in bpf_map__reuse_fd()
3642 err = zclose(map->fd); in bpf_map__reuse_fd()
3644 err = -errno; in bpf_map__reuse_fd()
3647 free(map->name); in bpf_map__reuse_fd()
3649 map->fd = new_fd; in bpf_map__reuse_fd()
3650 map->name = new_name; in bpf_map__reuse_fd()
3651 map->def.type = info.type; in bpf_map__reuse_fd()
3652 map->def.key_size = info.key_size; in bpf_map__reuse_fd()
3653 map->def.value_size = info.value_size; in bpf_map__reuse_fd()
3654 map->def.max_entries = info.max_entries; in bpf_map__reuse_fd()
3655 map->def.map_flags = info.map_flags; in bpf_map__reuse_fd()
3656 map->btf_key_type_id = info.btf_key_type_id; in bpf_map__reuse_fd()
3657 map->btf_value_type_id = info.btf_value_type_id; in bpf_map__reuse_fd()
3658 map->reused = true; in bpf_map__reuse_fd()
3671 return map->def.max_entries; in bpf_map__max_entries()
3676 if (map->fd >= 0) in bpf_map__set_max_entries()
3677 return -EBUSY; in bpf_map__set_max_entries()
3678 map->def.max_entries = max_entries; in bpf_map__set_max_entries()
3685 return -EINVAL; in bpf_map__resize()
3717 return -ret; in bpf_object__probe_loading()
3773 ret = -errno; in probe_kern_global_data()
3776 __func__, cp, -ret); in probe_kern_global_data()
3883 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) in probe_kern_exp_attach_type()
3901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ in probe_kern_probe_read_kernel()
3936 ret = -errno; in probe_prog_bind_map()
3939 __func__, cp, -ret); in probe_prog_bind_map()
3989 "BTF global function", probe_kern_btf_func_global,
4014 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { in kernel_supports()
4015 ret = feat->probe(); in kernel_supports()
4017 WRITE_ONCE(feat->res, FEAT_SUPPORTED); in kernel_supports()
4019 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4021 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); in kernel_supports()
4022 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4026 return READ_ONCE(feat->res) == FEAT_SUPPORTED; in kernel_supports()
4043 return (map_info.type == map->def.type && in map_is_reuse_compat()
4044 map_info.key_size == map->def.key_size && in map_is_reuse_compat()
4045 map_info.value_size == map->def.value_size && in map_is_reuse_compat()
4046 map_info.max_entries == map->def.max_entries && in map_is_reuse_compat()
4047 map_info.map_flags == map->def.map_flags); in map_is_reuse_compat()
4056 pin_fd = bpf_obj_get(map->pin_path); in bpf_object__reuse_map()
4058 err = -errno; in bpf_object__reuse_map()
4059 if (err == -ENOENT) { in bpf_object__reuse_map()
4061 map->pin_path); in bpf_object__reuse_map()
4065 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_object__reuse_map()
4067 map->pin_path, cp); in bpf_object__reuse_map()
4073 map->pin_path); in bpf_object__reuse_map()
4075 return -EINVAL; in bpf_object__reuse_map()
4083 map->pinned = true; in bpf_object__reuse_map()
4084 pr_debug("reused pinned map at '%s'\n", map->pin_path); in bpf_object__reuse_map()
4092 enum libbpf_map_type map_type = map->libbpf_type; in bpf_object__populate_internal_map()
4096 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); in bpf_object__populate_internal_map()
4098 err = -errno; in bpf_object__populate_internal_map()
4101 map->name, cp); in bpf_object__populate_internal_map()
4105 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ in bpf_object__populate_internal_map()
4107 err = bpf_map_freeze(map->fd); in bpf_object__populate_internal_map()
4109 err = -errno; in bpf_object__populate_internal_map()
4111 pr_warn("Error freezing map(%s) as read-only: %s\n", in bpf_object__populate_internal_map()
4112 map->name, cp); in bpf_object__populate_internal_map()
4124 struct bpf_map_def *def = &map->def; in bpf_object__create_map()
4129 create_attr.name = map->name; in bpf_object__create_map()
4130 create_attr.map_ifindex = map->map_ifindex; in bpf_object__create_map()
4131 create_attr.map_type = def->type; in bpf_object__create_map()
4132 create_attr.map_flags = def->map_flags; in bpf_object__create_map()
4133 create_attr.key_size = def->key_size; in bpf_object__create_map()
4134 create_attr.value_size = def->value_size; in bpf_object__create_map()
4135 create_attr.numa_node = map->numa_node; in bpf_object__create_map()
4137 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { in bpf_object__create_map()
4143 map->name, nr_cpus); in bpf_object__create_map()
4146 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); in bpf_object__create_map()
4149 create_attr.max_entries = def->max_entries; in bpf_object__create_map()
4154 map->btf_vmlinux_value_type_id; in bpf_object__create_map()
4159 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { in bpf_object__create_map()
4160 create_attr.btf_fd = btf__fd(obj->btf); in bpf_object__create_map()
4161 create_attr.btf_key_type_id = map->btf_key_type_id; in bpf_object__create_map()
4162 create_attr.btf_value_type_id = map->btf_value_type_id; in bpf_object__create_map()
4165 if (bpf_map_type__is_map_in_map(def->type)) { in bpf_object__create_map()
4166 if (map->inner_map) { in bpf_object__create_map()
4169 err = bpf_object__create_map(obj, map->inner_map); in bpf_object__create_map()
4172 map->name, err); in bpf_object__create_map()
4175 map->inner_map_fd = bpf_map__fd(map->inner_map); in bpf_object__create_map()
4177 if (map->inner_map_fd >= 0) in bpf_object__create_map()
4178 create_attr.inner_map_fd = map->inner_map_fd; in bpf_object__create_map()
4181 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4182 if (map->fd < 0 && (create_attr.btf_key_type_id || in bpf_object__create_map()
4185 int err = -errno; in bpf_object__create_map()
4189 map->name, cp, err); in bpf_object__create_map()
4193 map->btf_key_type_id = 0; in bpf_object__create_map()
4194 map->btf_value_type_id = 0; in bpf_object__create_map()
4195 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4198 if (map->fd < 0) in bpf_object__create_map()
4199 return -errno; in bpf_object__create_map()
4201 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { in bpf_object__create_map()
4202 bpf_map__destroy(map->inner_map); in bpf_object__create_map()
4203 zfree(&map->inner_map); in bpf_object__create_map()
4215 for (i = 0; i < map->init_slots_sz; i++) { in init_map_slots()
4216 if (!map->init_slots[i]) in init_map_slots()
4219 targ_map = map->init_slots[i]; in init_map_slots()
4221 err = bpf_map_update_elem(map->fd, &i, &fd, 0); in init_map_slots()
4223 err = -errno; in init_map_slots()
4225 map->name, i, targ_map->name, in init_map_slots()
4230 map->name, i, targ_map->name, fd); in init_map_slots()
4233 zfree(&map->init_slots); in init_map_slots()
4234 map->init_slots_sz = 0; in init_map_slots()
4247 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__create_maps()
4248 map = &obj->maps[i]; in bpf_object__create_maps()
4250 if (map->pin_path) { in bpf_object__create_maps()
4254 map->name); in bpf_object__create_maps()
4259 if (map->fd >= 0) { in bpf_object__create_maps()
4261 map->name, map->fd); in bpf_object__create_maps()
4268 map->name, map->fd); in bpf_object__create_maps()
4273 zclose(map->fd); in bpf_object__create_maps()
4278 if (map->init_slots_sz) { in bpf_object__create_maps()
4281 zclose(map->fd); in bpf_object__create_maps()
4287 if (map->pin_path && !map->pinned) { in bpf_object__create_maps()
4290 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", in bpf_object__create_maps()
4291 map->name, map->pin_path, err); in bpf_object__create_maps()
4292 zclose(map->fd); in bpf_object__create_maps()
4302 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); in bpf_object__create_maps()
4305 zclose(obj->maps[j].fd); in bpf_object__create_maps()
4311 /* represents BPF CO-RE field or array element accessor */
4320 /* high-level spec: named fields and array indices only */
4324 /* CO-RE relocation kind */
4326 /* high-level spec length */
4328 /* raw, low-level spec: 1-to-1 with accessor spec string */
4347 /* not a flexible array, if not inside a struct or has non-zero size */ in is_flex_arr()
4348 if (!acc->name || arr->nelems > 0) in is_flex_arr()
4352 t = btf__type_by_id(btf, acc->type_id); in is_flex_arr()
4353 return acc->idx == btf_vlen(t) - 1; in is_flex_arr()
4415 * Turn bpf_core_relo into a low- and high-level spec representation,
4417 * field bit offset, specified by accessor string. Low-level spec captures
4419 * struct/union members. High-level one only captures semantically meaningful
4434 * int x = &s->a[3]; // access string = '0:1:2:3'
4436 * Low-level spec has 1:1 mapping with each element of access string (it's
4439 * High-level spec will capture only 3 points:
4440 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4441 * - field 'a' access (corresponds to '2' in low-level spec);
4442 * - array element #3 access (corresponds to '3' in low-level spec).
4444 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4448 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4449 * string to specify enumerator's value index that need to be relocated.
4465 return -EINVAL; in bpf_core_parse_spec()
4468 spec->btf = btf; in bpf_core_parse_spec()
4469 spec->root_type_id = type_id; in bpf_core_parse_spec()
4470 spec->relo_kind = relo_kind; in bpf_core_parse_spec()
4472 /* type-based relocations don't have a field access string */ in bpf_core_parse_spec()
4475 return -EINVAL; in bpf_core_parse_spec()
4484 return -EINVAL; in bpf_core_parse_spec()
4485 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_parse_spec()
4486 return -E2BIG; in bpf_core_parse_spec()
4488 spec->raw_spec[spec->raw_len++] = access_idx; in bpf_core_parse_spec()
4491 if (spec->raw_len == 0) in bpf_core_parse_spec()
4492 return -EINVAL; in bpf_core_parse_spec()
4496 return -EINVAL; in bpf_core_parse_spec()
4498 access_idx = spec->raw_spec[0]; in bpf_core_parse_spec()
4499 acc = &spec->spec[0]; in bpf_core_parse_spec()
4500 acc->type_id = id; in bpf_core_parse_spec()
4501 acc->idx = access_idx; in bpf_core_parse_spec()
4502 spec->len++; in bpf_core_parse_spec()
4505 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) in bpf_core_parse_spec()
4506 return -EINVAL; in bpf_core_parse_spec()
4508 /* record enumerator name in a first accessor */ in bpf_core_parse_spec()
4509 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); in bpf_core_parse_spec()
4514 return -EINVAL; in bpf_core_parse_spec()
4519 spec->bit_offset = access_idx * sz * 8; in bpf_core_parse_spec()
4521 for (i = 1; i < spec->raw_len; i++) { in bpf_core_parse_spec()
4524 return -EINVAL; in bpf_core_parse_spec()
4526 access_idx = spec->raw_spec[i]; in bpf_core_parse_spec()
4527 acc = &spec->spec[spec->len]; in bpf_core_parse_spec()
4534 return -EINVAL; in bpf_core_parse_spec()
4537 spec->bit_offset += bit_offset; in bpf_core_parse_spec()
4540 if (m->name_off) { in bpf_core_parse_spec()
4541 name = btf__name_by_offset(btf, m->name_off); in bpf_core_parse_spec()
4543 return -EINVAL; in bpf_core_parse_spec()
4545 acc->type_id = id; in bpf_core_parse_spec()
4546 acc->idx = access_idx; in bpf_core_parse_spec()
4547 acc->name = name; in bpf_core_parse_spec()
4548 spec->len++; in bpf_core_parse_spec()
4551 id = m->type; in bpf_core_parse_spec()
4556 t = skip_mods_and_typedefs(btf, a->type, &id); in bpf_core_parse_spec()
4558 return -EINVAL; in bpf_core_parse_spec()
4560 flex = is_flex_arr(btf, acc - 1, a); in bpf_core_parse_spec()
4561 if (!flex && access_idx >= a->nelems) in bpf_core_parse_spec()
4562 return -EINVAL; in bpf_core_parse_spec()
4564 spec->spec[spec->len].type_id = id; in bpf_core_parse_spec()
4565 spec->spec[spec->len].idx = access_idx; in bpf_core_parse_spec()
4566 spec->len++; in bpf_core_parse_spec()
4571 spec->bit_offset += access_idx * sz * 8; in bpf_core_parse_spec()
4575 return -EINVAL; in bpf_core_parse_spec()
4592 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4599 for (i = n - 5; i >= 0; i--) { in bpf_core_essential_name_len()
4614 free(cand_ids->data); in bpf_core_free_cands()
4631 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4633 local_name = btf__name_by_offset(local_btf, local_t->name_off); in bpf_core_find_cands()
4635 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4640 return ERR_PTR(-ENOMEM); in bpf_core_find_cands()
4648 targ_name = btf__name_by_offset(targ_btf, t->name_off); in bpf_core_find_cands()
4657 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", in bpf_core_find_cands()
4660 new_ids = libbpf_reallocarray(cand_ids->data, in bpf_core_find_cands()
4661 cand_ids->len + 1, in bpf_core_find_cands()
4662 sizeof(*cand_ids->data)); in bpf_core_find_cands()
4664 err = -ENOMEM; in bpf_core_find_cands()
4667 cand_ids->data = new_ids; in bpf_core_find_cands()
4668 cand_ids->data[cand_ids->len++] = i; in bpf_core_find_cands()
4680 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4681 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4682 * - any two PTRs are always compatible;
4683 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4685 * - for ENUMs, check sizes, names are ignored;
4686 * - for INT, size and signedness are ignored;
4687 * - for ARRAY, dimensionality is ignored, element types are checked for
4689 * - everything else shouldn't be ever a target of relocation.
4691 * more experience with using BPF CO-RE relocations.
4704 return -EINVAL; in bpf_core_fields_are_compat()
4720 local_type->name_off); in bpf_core_fields_are_compat()
4721 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); in bpf_core_fields_are_compat()
4724 /* one of them is anonymous or both w/ same flavor-less names */ in bpf_core_fields_are_compat()
4730 /* just reject deprecated bitfield-like integers; all other in bpf_core_fields_are_compat()
4736 local_id = btf_array(local_type)->type; in bpf_core_fields_are_compat()
4737 targ_id = btf_array(targ_type)->type; in bpf_core_fields_are_compat()
4747 * Given single high-level named field accessor in local type, find
4748 * corresponding high-level accessor for a target type. Along the way,
4749 * maintain low-level spec for target as well. Also keep updating target
4777 return -EINVAL; in bpf_core_match_member()
4781 local_id = local_acc->type_id; in bpf_core_match_member()
4783 local_member = btf_members(local_type) + local_acc->idx; in bpf_core_match_member()
4784 local_name = btf__name_by_offset(local_btf, local_member->name_off); in bpf_core_match_member()
4794 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_match_member()
4795 return -E2BIG; in bpf_core_match_member()
4798 spec->bit_offset += bit_offset; in bpf_core_match_member()
4799 spec->raw_spec[spec->raw_len++] = i; in bpf_core_match_member()
4801 targ_name = btf__name_by_offset(targ_btf, m->name_off); in bpf_core_match_member()
4805 targ_btf, m->type, in bpf_core_match_member()
4813 targ_acc = &spec->spec[spec->len++]; in bpf_core_match_member()
4814 targ_acc->type_id = targ_id; in bpf_core_match_member()
4815 targ_acc->idx = i; in bpf_core_match_member()
4816 targ_acc->name = targ_name; in bpf_core_match_member()
4818 *next_targ_id = m->type; in bpf_core_match_member()
4820 local_member->type, in bpf_core_match_member()
4821 targ_btf, m->type); in bpf_core_match_member()
4823 spec->len--; /* pop accessor */ in bpf_core_match_member()
4827 spec->bit_offset -= bit_offset; in bpf_core_match_member()
4828 spec->raw_len--; in bpf_core_match_member()
4835 * type-based CO-RE relocations and follow slightly different rules than
4836 * field-based relocations. This function assumes that root types were already
4837 * checked for name match. Beyond that initial root-level name check, names
4839 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
4842 * - for ENUMs, the size is ignored;
4843 * - for INT, size and signedness are ignored;
4844 * - for ARRAY, dimensionality is ignored, element types are checked for
4846 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
4847 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
4848 * - FUNC_PROTOs are compatible if they have compatible signature: same
4851 * more experience with using BPF CO-RE relocations.
4866 depth--; in bpf_core_types_are_compat()
4868 return -EINVAL; in bpf_core_types_are_compat()
4873 return -EINVAL; in bpf_core_types_are_compat()
4886 /* just reject deprecated bitfield-like integers; all other in bpf_core_types_are_compat()
4891 local_id = local_type->type; in bpf_core_types_are_compat()
4892 targ_id = targ_type->type; in bpf_core_types_are_compat()
4895 local_id = btf_array(local_type)->type; in bpf_core_types_are_compat()
4896 targ_id = btf_array(targ_type)->type; in bpf_core_types_are_compat()
4909 skip_mods_and_typedefs(local_btf, local_p->type, &local_id); in bpf_core_types_are_compat()
4910 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); in bpf_core_types_are_compat()
4917 skip_mods_and_typedefs(local_btf, local_type->type, &local_id); in bpf_core_types_are_compat()
4918 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); in bpf_core_types_are_compat()
4930 * target spec (high-level, low-level + bit offset).
4942 targ_spec->btf = targ_btf; in bpf_core_spec_match()
4943 targ_spec->root_type_id = targ_id; in bpf_core_spec_match()
4944 targ_spec->relo_kind = local_spec->relo_kind; in bpf_core_spec_match()
4946 if (core_relo_is_type_based(local_spec->relo_kind)) { in bpf_core_spec_match()
4947 return bpf_core_types_are_compat(local_spec->btf, in bpf_core_spec_match()
4948 local_spec->root_type_id, in bpf_core_spec_match()
4952 local_acc = &local_spec->spec[0]; in bpf_core_spec_match()
4953 targ_acc = &targ_spec->spec[0]; in bpf_core_spec_match()
4955 if (core_relo_is_enumval_based(local_spec->relo_kind)) { in bpf_core_spec_match()
4961 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); in bpf_core_spec_match()
4965 local_essent_len = bpf_core_essential_name_len(local_acc->name); in bpf_core_spec_match()
4968 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); in bpf_core_spec_match()
4972 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { in bpf_core_spec_match()
4973 targ_acc->type_id = targ_id; in bpf_core_spec_match()
4974 targ_acc->idx = i; in bpf_core_spec_match()
4975 targ_acc->name = targ_name; in bpf_core_spec_match()
4976 targ_spec->len++; in bpf_core_spec_match()
4977 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
4978 targ_spec->raw_len++; in bpf_core_spec_match()
4985 if (!core_relo_is_field_based(local_spec->relo_kind)) in bpf_core_spec_match()
4986 return -EINVAL; in bpf_core_spec_match()
4988 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { in bpf_core_spec_match()
4989 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, in bpf_core_spec_match()
4992 return -EINVAL; in bpf_core_spec_match()
4994 if (local_acc->name) { in bpf_core_spec_match()
4995 matched = bpf_core_match_member(local_spec->btf, in bpf_core_spec_match()
5014 flex = is_flex_arr(targ_btf, targ_acc - 1, a); in bpf_core_spec_match()
5015 if (!flex && local_acc->idx >= a->nelems) in bpf_core_spec_match()
5017 if (!skip_mods_and_typedefs(targ_btf, a->type, in bpf_core_spec_match()
5019 return -EINVAL; in bpf_core_spec_match()
5023 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_spec_match()
5024 return -E2BIG; in bpf_core_spec_match()
5026 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5027 targ_acc->idx = local_acc->idx; in bpf_core_spec_match()
5028 targ_acc->name = NULL; in bpf_core_spec_match()
5029 targ_spec->len++; in bpf_core_spec_match()
5030 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5031 targ_spec->raw_len++; in bpf_core_spec_match()
5036 targ_spec->bit_offset += local_acc->idx * sz * 8; in bpf_core_spec_match()
5059 if (relo->kind == BPF_FIELD_EXISTS) { in bpf_core_calc_field_relo()
5065 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_field_relo()
5067 acc = &spec->spec[spec->len - 1]; in bpf_core_calc_field_relo()
5068 t = btf__type_by_id(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5071 if (!acc->name) { in bpf_core_calc_field_relo()
5072 if (relo->kind == BPF_FIELD_BYTE_OFFSET) { in bpf_core_calc_field_relo()
5073 *val = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5075 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5077 return -EINVAL; in bpf_core_calc_field_relo()
5079 *type_id = acc->type_id; in bpf_core_calc_field_relo()
5080 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { in bpf_core_calc_field_relo()
5081 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5083 return -EINVAL; in bpf_core_calc_field_relo()
5087 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5088 return -EINVAL; in bpf_core_calc_field_relo()
5095 m = btf_members(t) + acc->idx; in bpf_core_calc_field_relo()
5096 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); in bpf_core_calc_field_relo()
5097 bit_off = spec->bit_offset; in bpf_core_calc_field_relo()
5098 bit_sz = btf_member_bitfield_size(t, acc->idx); in bpf_core_calc_field_relo()
5102 byte_sz = mt->size; in bpf_core_calc_field_relo()
5105 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { in bpf_core_calc_field_relo()
5107 /* bitfield can't be read with 64-bit read */ in bpf_core_calc_field_relo()
5109 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5110 return -E2BIG; in bpf_core_calc_field_relo()
5116 sz = btf__resolve_size(spec->btf, field_type_id); in bpf_core_calc_field_relo()
5118 return -EINVAL; in bpf_core_calc_field_relo()
5120 byte_off = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5131 switch (relo->kind) { in bpf_core_calc_field_relo()
5151 *val = 64 - (bit_off + bit_sz - byte_off * 8); in bpf_core_calc_field_relo()
5153 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); in bpf_core_calc_field_relo()
5157 *val = 64 - bit_sz; in bpf_core_calc_field_relo()
5163 return -EOPNOTSUPP; in bpf_core_calc_field_relo()
5175 /* type-based relos return zero when target type is not found */ in bpf_core_calc_type_relo()
5181 switch (relo->kind) { in bpf_core_calc_type_relo()
5183 *val = spec->root_type_id; in bpf_core_calc_type_relo()
5189 sz = btf__resolve_size(spec->btf, spec->root_type_id); in bpf_core_calc_type_relo()
5191 return -EINVAL; in bpf_core_calc_type_relo()
5197 return -EOPNOTSUPP; in bpf_core_calc_type_relo()
5210 switch (relo->kind) { in bpf_core_calc_enumval_relo()
5216 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_enumval_relo()
5217 t = btf__type_by_id(spec->btf, spec->spec[0].type_id); in bpf_core_calc_enumval_relo()
5218 e = btf_enum(t) + spec->spec[0].idx; in bpf_core_calc_enumval_relo()
5219 *val = e->val; in bpf_core_calc_enumval_relo()
5222 return -EOPNOTSUPP; in bpf_core_calc_enumval_relo()
5242 * memory loads of pointers and integers; this is necessary for 32-bit
5266 int err = -EOPNOTSUPP; in bpf_core_calc_relo()
5268 res->orig_val = 0; in bpf_core_calc_relo()
5269 res->new_val = 0; in bpf_core_calc_relo()
5270 res->poison = false; in bpf_core_calc_relo()
5271 res->validate = true; in bpf_core_calc_relo()
5272 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5273 res->orig_sz = res->new_sz = 0; in bpf_core_calc_relo()
5274 res->orig_type_id = res->new_type_id = 0; in bpf_core_calc_relo()
5276 if (core_relo_is_field_based(relo->kind)) { in bpf_core_calc_relo()
5278 &res->orig_val, &res->orig_sz, in bpf_core_calc_relo()
5279 &res->orig_type_id, &res->validate); in bpf_core_calc_relo()
5281 &res->new_val, &res->new_sz, in bpf_core_calc_relo()
5282 &res->new_type_id, NULL); in bpf_core_calc_relo()
5289 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5290 if (res->orig_sz != res->new_sz) { in bpf_core_calc_relo()
5293 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); in bpf_core_calc_relo()
5294 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); in bpf_core_calc_relo()
5298 * - reading a 32-bit kernel pointer, while on BPF in bpf_core_calc_relo()
5299 * size pointers are always 64-bit; in this case in bpf_core_calc_relo()
5302 * zero-extended upper 32-bits; in bpf_core_calc_relo()
5303 * - reading unsigned integers, again due to in bpf_core_calc_relo()
5304 * zero-extension is preserving the value correctly. in bpf_core_calc_relo()
5320 res->fail_memsz_adjust = true; in bpf_core_calc_relo()
5322 } else if (core_relo_is_type_based(relo->kind)) { in bpf_core_calc_relo()
5323 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5324 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5325 } else if (core_relo_is_enumval_based(relo->kind)) { in bpf_core_calc_relo()
5326 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5327 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5331 if (err == -EUCLEAN) { in bpf_core_calc_relo()
5333 res->poison = true; in bpf_core_calc_relo()
5335 } else if (err == -EOPNOTSUPP) { in bpf_core_calc_relo()
5337 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", in bpf_core_calc_relo()
5338 prog->name, relo_idx, core_relo_kind_str(relo->kind), in bpf_core_calc_relo()
5339 relo->kind, relo->insn_off / 8); in bpf_core_calc_relo()
5353 prog->name, relo_idx, insn_idx); in bpf_core_poison_insn()
5354 insn->code = BPF_JMP | BPF_CALL; in bpf_core_poison_insn()
5355 insn->dst_reg = 0; in bpf_core_poison_insn()
5356 insn->src_reg = 0; in bpf_core_poison_insn()
5357 insn->off = 0; in bpf_core_poison_insn()
5362 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ in bpf_core_poison_insn()
5367 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); in is_ldimm64()
5372 switch (BPF_SIZE(insn->code)) { in insn_bpf_size_to_bytes()
5377 default: return -1; in insn_bpf_size_to_bytes()
5388 default: return -1; in insn_bytes_to_bpf_size()
5397 * Expected insn->imm value is determined using relocation kind and local
5398 * spec, and is checked before patching instruction. If actual insn->imm value
5404 * 3. rX = <imm64> (load with 64-bit immediate value);
5419 if (relo->insn_off % BPF_INSN_SZ) in bpf_core_patch_insn()
5420 return -EINVAL; in bpf_core_patch_insn()
5421 insn_idx = relo->insn_off / BPF_INSN_SZ; in bpf_core_patch_insn()
5423 * program's frame of reference; (sub-)program code is not yet in bpf_core_patch_insn()
5424 * relocated, so it's enough to just subtract in-section offset in bpf_core_patch_insn()
5426 insn_idx = insn_idx - prog->sec_insn_off; in bpf_core_patch_insn()
5427 insn = &prog->insns[insn_idx]; in bpf_core_patch_insn()
5428 class = BPF_CLASS(insn->code); in bpf_core_patch_insn()
5430 if (res->poison) { in bpf_core_patch_insn()
5441 orig_val = res->orig_val; in bpf_core_patch_insn()
5442 new_val = res->new_val; in bpf_core_patch_insn()
5447 if (BPF_SRC(insn->code) != BPF_K) in bpf_core_patch_insn()
5448 return -EINVAL; in bpf_core_patch_insn()
5449 if (res->validate && insn->imm != orig_val) { in bpf_core_patch_insn()
5450 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5451 prog->name, relo_idx, in bpf_core_patch_insn()
5452 insn_idx, insn->imm, orig_val, new_val); in bpf_core_patch_insn()
5453 return -EINVAL; in bpf_core_patch_insn()
5455 orig_val = insn->imm; in bpf_core_patch_insn()
5456 insn->imm = new_val; in bpf_core_patch_insn()
5457 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", in bpf_core_patch_insn()
5458 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5464 if (res->validate && insn->off != orig_val) { in bpf_core_patch_insn()
5465 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5466 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); in bpf_core_patch_insn()
5467 return -EINVAL; in bpf_core_patch_insn()
5471 prog->name, relo_idx, insn_idx, new_val); in bpf_core_patch_insn()
5472 return -ERANGE; in bpf_core_patch_insn()
5474 if (res->fail_memsz_adjust) { in bpf_core_patch_insn()
5477 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5481 orig_val = insn->off; in bpf_core_patch_insn()
5482 insn->off = new_val; in bpf_core_patch_insn()
5483 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", in bpf_core_patch_insn()
5484 prog->name, relo_idx, insn_idx, orig_val, new_val); in bpf_core_patch_insn()
5486 if (res->new_sz != res->orig_sz) { in bpf_core_patch_insn()
5490 if (insn_bytes_sz != res->orig_sz) { in bpf_core_patch_insn()
5492 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); in bpf_core_patch_insn()
5493 return -EINVAL; in bpf_core_patch_insn()
5496 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); in bpf_core_patch_insn()
5499 prog->name, relo_idx, insn_idx, res->new_sz); in bpf_core_patch_insn()
5500 return -EINVAL; in bpf_core_patch_insn()
5503 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); in bpf_core_patch_insn()
5504 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", in bpf_core_patch_insn()
5505 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); in bpf_core_patch_insn()
5513 insn_idx + 1 >= prog->insns_cnt || in bpf_core_patch_insn()
5517 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5518 return -EINVAL; in bpf_core_patch_insn()
5522 if (res->validate && imm != orig_val) { in bpf_core_patch_insn()
5523 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", in bpf_core_patch_insn()
5524 prog->name, relo_idx, in bpf_core_patch_insn()
5527 return -EINVAL; in bpf_core_patch_insn()
5531 insn[1].imm = 0; /* currently only 32-bit values are supported */ in bpf_core_patch_insn()
5532 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", in bpf_core_patch_insn()
5533 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5539 prog->name, relo_idx, insn_idx, insn->code, in bpf_core_patch_insn()
5540 insn->src_reg, insn->dst_reg, insn->off, insn->imm); in bpf_core_patch_insn()
5541 return -EINVAL; in bpf_core_patch_insn()
5548 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5549 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5559 type_id = spec->root_type_id; in bpf_core_dump_spec()
5560 t = btf__type_by_id(spec->btf, type_id); in bpf_core_dump_spec()
5561 s = btf__name_by_offset(spec->btf, t->name_off); in bpf_core_dump_spec()
5565 if (core_relo_is_type_based(spec->relo_kind)) in bpf_core_dump_spec()
5568 if (core_relo_is_enumval_based(spec->relo_kind)) { in bpf_core_dump_spec()
5569 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); in bpf_core_dump_spec()
5570 e = btf_enum(t) + spec->raw_spec[0]; in bpf_core_dump_spec()
5571 s = btf__name_by_offset(spec->btf, e->name_off); in bpf_core_dump_spec()
5573 libbpf_print(level, "::%s = %u", s, e->val); in bpf_core_dump_spec()
5577 if (core_relo_is_field_based(spec->relo_kind)) { in bpf_core_dump_spec()
5578 for (i = 0; i < spec->len; i++) { in bpf_core_dump_spec()
5579 if (spec->spec[i].name) in bpf_core_dump_spec()
5580 libbpf_print(level, ".%s", spec->spec[i].name); in bpf_core_dump_spec()
5581 else if (i > 0 || spec->spec[i].idx > 0) in bpf_core_dump_spec()
5582 libbpf_print(level, "[%u]", spec->spec[i].idx); in bpf_core_dump_spec()
5586 for (i = 0; i < spec->raw_len; i++) in bpf_core_dump_spec()
5587 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); in bpf_core_dump_spec()
5589 if (spec->bit_offset % 8) in bpf_core_dump_spec()
5591 spec->bit_offset / 8, spec->bit_offset % 8); in bpf_core_dump_spec()
5593 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); in bpf_core_dump_spec()
5614 * CO-RE relocate single instruction.
5627 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5639 * high-level spec accessors, meaning that all named fields should match,
5645 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5655 * efficient memory-wise and not significantly worse (if not better)
5656 * CPU-wise compared to prebuilding a map from all local type names to
5671 const void *type_key = u32_as_hash_key(relo->type_id); in bpf_core_apply_relo()
5680 local_id = relo->type_id; in bpf_core_apply_relo()
5683 return -EINVAL; in bpf_core_apply_relo()
5685 local_name = btf__name_by_offset(local_btf, local_type->name_off); in bpf_core_apply_relo()
5687 return -EINVAL; in bpf_core_apply_relo()
5689 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); in bpf_core_apply_relo()
5691 return -EINVAL; in bpf_core_apply_relo()
5693 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); in bpf_core_apply_relo()
5696 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5699 return -EINVAL; in bpf_core_apply_relo()
5702 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, in bpf_core_apply_relo()
5703 relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5708 if (relo->kind == BPF_TYPE_ID_LOCAL) { in bpf_core_apply_relo()
5719 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5720 return -EOPNOTSUPP; in bpf_core_apply_relo()
5727 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5738 for (i = 0, j = 0; i < cand_ids->len; i++) { in bpf_core_apply_relo()
5739 cand_id = cand_ids->data[i]; in bpf_core_apply_relo()
5743 prog->name, relo_idx, i); in bpf_core_apply_relo()
5749 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, in bpf_core_apply_relo()
5750 relo_idx, err == 0 ? "non-matching" : "matching", i); in bpf_core_apply_relo()
5769 prog->name, relo_idx, cand_spec.bit_offset, in bpf_core_apply_relo()
5771 return -EINVAL; in bpf_core_apply_relo()
5778 prog->name, relo_idx, in bpf_core_apply_relo()
5781 return -EINVAL; in bpf_core_apply_relo()
5784 cand_ids->data[j++] = cand_spec.root_type_id; in bpf_core_apply_relo()
5796 cand_ids->len = j; in bpf_core_apply_relo()
5811 prog->name, relo_idx); in bpf_core_apply_relo()
5824 prog->name, relo_idx, relo->insn_off, err); in bpf_core_apply_relo()
5825 return -EINVAL; in bpf_core_apply_relo()
5844 if (obj->btf_ext->core_relo_info.len == 0) in bpf_object__relocate_core()
5850 targ_btf = obj->btf_vmlinux; in bpf_object__relocate_core()
5862 seg = &obj->btf_ext->core_relo_info; in bpf_object__relocate_core()
5864 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in bpf_object__relocate_core()
5866 err = -EINVAL; in bpf_object__relocate_core()
5872 * prog->sec_idx to do a proper search by section index and in bpf_object__relocate_core()
5876 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_core()
5877 prog = &obj->programs[i]; in bpf_object__relocate_core()
5878 if (strcmp(prog->sec_name, sec_name) == 0) in bpf_object__relocate_core()
5883 return -ENOENT; in bpf_object__relocate_core()
5885 sec_idx = prog->sec_idx; in bpf_object__relocate_core()
5887 pr_debug("sec '%s': found %d CO-RE relocations\n", in bpf_object__relocate_core()
5888 sec_name, sec->num_info); in bpf_object__relocate_core()
5891 insn_idx = rec->insn_off / BPF_INSN_SZ; in bpf_object__relocate_core()
5894 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", in bpf_object__relocate_core()
5896 err = -EINVAL; in bpf_object__relocate_core()
5899 /* no need to apply CO-RE relocation if the program is in bpf_object__relocate_core()
5902 if (!prog->load) in bpf_object__relocate_core()
5905 err = bpf_core_apply_relo(prog, rec, i, obj->btf, in bpf_object__relocate_core()
5909 prog->name, i, err); in bpf_object__relocate_core()
5916 /* obj->btf_vmlinux is freed at the end of object load phase */ in bpf_object__relocate_core()
5917 if (targ_btf != obj->btf_vmlinux) in bpf_object__relocate_core()
5921 bpf_core_free_cands(entry->value); in bpf_object__relocate_core()
5929 * - map references;
5930 * - global variable references;
5931 * - extern references.
5938 for (i = 0; i < prog->nr_reloc; i++) { in bpf_object__relocate_data()
5939 struct reloc_desc *relo = &prog->reloc_desc[i]; in bpf_object__relocate_data()
5940 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; in bpf_object__relocate_data()
5943 switch (relo->type) { in bpf_object__relocate_data()
5946 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
5947 relo->processed = true; in bpf_object__relocate_data()
5951 insn[1].imm = insn[0].imm + relo->sym_off; in bpf_object__relocate_data()
5952 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
5953 relo->processed = true; in bpf_object__relocate_data()
5956 ext = &obj->externs[relo->sym_off]; in bpf_object__relocate_data()
5957 if (ext->type == EXT_KCFG) { in bpf_object__relocate_data()
5959 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; in bpf_object__relocate_data()
5960 insn[1].imm = ext->kcfg.data_off; in bpf_object__relocate_data()
5962 if (ext->ksym.type_id) { /* typed ksyms */ in bpf_object__relocate_data()
5964 insn[0].imm = ext->ksym.vmlinux_btf_id; in bpf_object__relocate_data()
5966 insn[0].imm = (__u32)ext->ksym.addr; in bpf_object__relocate_data()
5967 insn[1].imm = ext->ksym.addr >> 32; in bpf_object__relocate_data()
5970 relo->processed = true; in bpf_object__relocate_data()
5977 prog->name, i, relo->type); in bpf_object__relocate_data()
5978 return -EINVAL; in bpf_object__relocate_data()
5999 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in adjust_prog_btf_ext_info()
6001 return -EINVAL; in adjust_prog_btf_ext_info()
6002 if (strcmp(sec_name, prog->sec_name) != 0) in adjust_prog_btf_ext_info()
6008 if (insn_off < prog->sec_insn_off) in adjust_prog_btf_ext_info()
6010 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) in adjust_prog_btf_ext_info()
6015 copy_end = rec + ext_info->rec_size; in adjust_prog_btf_ext_info()
6019 return -ENOENT; in adjust_prog_btf_ext_info()
6021 /* append func/line info of a given (sub-)program to the main in adjust_prog_btf_ext_info()
6024 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; in adjust_prog_btf_ext_info()
6025 new_sz = old_sz + (copy_end - copy_start); in adjust_prog_btf_ext_info()
6028 return -ENOMEM; in adjust_prog_btf_ext_info()
6030 *prog_rec_cnt = new_sz / ext_info->rec_size; in adjust_prog_btf_ext_info()
6031 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); in adjust_prog_btf_ext_info()
6033 /* Kernel instruction offsets are in units of 8-byte in adjust_prog_btf_ext_info()
6039 off_adj = prog->sub_insn_off - prog->sec_insn_off; in adjust_prog_btf_ext_info()
6042 for (; rec < rec_end; rec += ext_info->rec_size) { in adjust_prog_btf_ext_info()
6047 *prog_rec_sz = ext_info->rec_size; in adjust_prog_btf_ext_info()
6051 return -ENOENT; in adjust_prog_btf_ext_info()
6064 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) in reloc_prog_func_and_line_info()
6070 if (main_prog != prog && !main_prog->func_info) in reloc_prog_func_and_line_info()
6073 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, in reloc_prog_func_and_line_info()
6074 &main_prog->func_info, in reloc_prog_func_and_line_info()
6075 &main_prog->func_info_cnt, in reloc_prog_func_and_line_info()
6076 &main_prog->func_info_rec_size); in reloc_prog_func_and_line_info()
6078 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6079 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", in reloc_prog_func_and_line_info()
6080 prog->name, err); in reloc_prog_func_and_line_info()
6083 if (main_prog->func_info) { in reloc_prog_func_and_line_info()
6088 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); in reloc_prog_func_and_line_info()
6092 …pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext … in reloc_prog_func_and_line_info()
6093 prog->name); in reloc_prog_func_and_line_info()
6098 if (main_prog != prog && !main_prog->line_info) in reloc_prog_func_and_line_info()
6101 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, in reloc_prog_func_and_line_info()
6102 &main_prog->line_info, in reloc_prog_func_and_line_info()
6103 &main_prog->line_info_cnt, in reloc_prog_func_and_line_info()
6104 &main_prog->line_info_rec_size); in reloc_prog_func_and_line_info()
6106 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6108 prog->name, err); in reloc_prog_func_and_line_info()
6111 if (main_prog->line_info) { in reloc_prog_func_and_line_info()
6116 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); in reloc_prog_func_and_line_info()
6121 prog->name); in reloc_prog_func_and_line_info()
6131 if (insn_idx == relo->insn_idx) in cmp_relo_by_insn_idx()
6133 return insn_idx < relo->insn_idx ? -1 : 1; in cmp_relo_by_insn_idx()
6138 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, in find_prog_insn_relo()
6139 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); in find_prog_insn_relo()
6156 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { in bpf_object__reloc_code()
6157 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6162 if (relo && relo->type != RELO_CALL) { in bpf_object__reloc_code()
6164 prog->name, insn_idx, relo->type); in bpf_object__reloc_code()
6165 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6168 /* sub-program instruction index is a combination of in bpf_object__reloc_code()
6171 * call always has imm = -1, but for static functions in bpf_object__reloc_code()
6172 * relocation is against STT_SECTION and insn->imm in bpf_object__reloc_code()
6173 * points to a start of a static function in bpf_object__reloc_code()
6175 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; in bpf_object__reloc_code()
6177 /* if subprogram call is to a static function within in bpf_object__reloc_code()
6180 * offset necessary, insns->imm is relative to in bpf_object__reloc_code()
6183 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; in bpf_object__reloc_code()
6186 /* we enforce that sub-programs should be in .text section */ in bpf_object__reloc_code()
6187 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); in bpf_object__reloc_code()
6189 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", in bpf_object__reloc_code()
6190 prog->name); in bpf_object__reloc_code()
6191 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6197 * - append it at the end of main program's instructions blog; in bpf_object__reloc_code()
6198 * - process is recursively, while current program is put on hold; in bpf_object__reloc_code()
6199 * - if that subprogram calls some other not yet processes in bpf_object__reloc_code()
6204 if (subprog->sub_insn_off == 0) { in bpf_object__reloc_code()
6205 subprog->sub_insn_off = main_prog->insns_cnt; in bpf_object__reloc_code()
6207 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; in bpf_object__reloc_code()
6208 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); in bpf_object__reloc_code()
6210 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); in bpf_object__reloc_code()
6211 return -ENOMEM; in bpf_object__reloc_code()
6213 main_prog->insns = insns; in bpf_object__reloc_code()
6214 main_prog->insns_cnt = new_cnt; in bpf_object__reloc_code()
6216 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, in bpf_object__reloc_code()
6217 subprog->insns_cnt * sizeof(*insns)); in bpf_object__reloc_code()
6219 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", in bpf_object__reloc_code()
6220 main_prog->name, subprog->insns_cnt, subprog->name); in bpf_object__reloc_code()
6227 /* main_prog->insns memory could have been re-allocated, so in bpf_object__reloc_code()
6230 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6236 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; in bpf_object__reloc_code()
6239 relo->processed = true; in bpf_object__reloc_code()
6242 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); in bpf_object__reloc_code()
6249 * Relocate sub-program calls.
6251 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6252 * main prog) is processed separately. For each subprog (non-entry functions,
6261 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6277 * subprog->sub_insn_off as zero at all times and won't be appended to current
6286 * +--------+ +-------+
6288 * +--+---+ +--+-+-+ +---+--+
6290 * +--+---+ +------+ +---+--+
6293 * +---+-------+ +------+----+
6295 * +-----------+ +-----------+
6300 * +-----------+------+
6302 * +-----------+------+
6307 * +-----------+------+------+
6309 * +-----------+------+------+
6318 * +-----------+------+
6320 * +-----------+------+
6323 * +-----------+------+------+
6325 * +-----------+------+------+
6338 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_calls()
6339 subprog = &obj->programs[i]; in bpf_object__relocate_calls()
6343 subprog->sub_insn_off = 0; in bpf_object__relocate_calls()
6344 for (j = 0; j < subprog->nr_reloc; j++) in bpf_object__relocate_calls()
6345 if (subprog->reloc_desc[j].type == RELO_CALL) in bpf_object__relocate_calls()
6346 subprog->reloc_desc[j].processed = false; in bpf_object__relocate_calls()
6364 if (obj->btf_ext) { in bpf_object__relocate()
6367 pr_warn("failed to perform CO-RE relocations: %d\n", in bpf_object__relocate()
6372 /* relocate data references first for all programs and sub-programs, in bpf_object__relocate()
6374 * subprogram processing won't need to re-calculate any of them in bpf_object__relocate()
6376 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6377 prog = &obj->programs[i]; in bpf_object__relocate()
6381 prog->name, err); in bpf_object__relocate()
6390 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6391 prog = &obj->programs[i]; in bpf_object__relocate()
6392 /* sub-program's sub-calls are relocated within the context of in bpf_object__relocate()
6401 prog->name, err); in bpf_object__relocate()
6406 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6407 prog = &obj->programs[i]; in bpf_object__relocate()
6408 zfree(&prog->reloc_desc); in bpf_object__relocate()
6409 prog->nr_reloc = 0; in bpf_object__relocate()
6433 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) in bpf_object__collect_map_relos()
6434 return -EINVAL; in bpf_object__collect_map_relos()
6435 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); in bpf_object__collect_map_relos()
6437 return -EINVAL; in bpf_object__collect_map_relos()
6439 symbols = obj->efile.symbols; in bpf_object__collect_map_relos()
6440 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_map_relos()
6444 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6449 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6452 if (sym.st_shndx != obj->efile.btf_maps_shndx) { in bpf_object__collect_map_relos()
6453 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", in bpf_object__collect_map_relos()
6455 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_map_relos()
6462 for (j = 0; j < obj->nr_maps; j++) { in bpf_object__collect_map_relos()
6463 map = &obj->maps[j]; in bpf_object__collect_map_relos()
6464 if (map->sec_idx != obj->efile.btf_maps_shndx) in bpf_object__collect_map_relos()
6467 vi = btf_var_secinfos(sec) + map->btf_var_idx; in bpf_object__collect_map_relos()
6468 if (vi->offset <= rel.r_offset && in bpf_object__collect_map_relos()
6469 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) in bpf_object__collect_map_relos()
6472 if (j == obj->nr_maps) { in bpf_object__collect_map_relos()
6475 return -EINVAL; in bpf_object__collect_map_relos()
6478 if (!bpf_map_type__is_map_in_map(map->def.type)) in bpf_object__collect_map_relos()
6479 return -EINVAL; in bpf_object__collect_map_relos()
6480 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && in bpf_object__collect_map_relos()
6481 map->def.key_size != sizeof(int)) { in bpf_object__collect_map_relos()
6482 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", in bpf_object__collect_map_relos()
6483 i, map->name, sizeof(int)); in bpf_object__collect_map_relos()
6484 return -EINVAL; in bpf_object__collect_map_relos()
6489 return -ESRCH; in bpf_object__collect_map_relos()
6491 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__collect_map_relos()
6492 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__collect_map_relos()
6494 return -EINVAL; in bpf_object__collect_map_relos()
6495 member = btf_members(def) + btf_vlen(def) - 1; in bpf_object__collect_map_relos()
6496 mname = btf__name_by_offset(obj->btf, member->name_off); in bpf_object__collect_map_relos()
6498 return -EINVAL; in bpf_object__collect_map_relos()
6500 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; in bpf_object__collect_map_relos()
6501 if (rel.r_offset - vi->offset < moff) in bpf_object__collect_map_relos()
6502 return -EINVAL; in bpf_object__collect_map_relos()
6504 moff = rel.r_offset - vi->offset - moff; in bpf_object__collect_map_relos()
6509 return -EINVAL; in bpf_object__collect_map_relos()
6511 if (moff >= map->init_slots_sz) { in bpf_object__collect_map_relos()
6513 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); in bpf_object__collect_map_relos()
6515 return -ENOMEM; in bpf_object__collect_map_relos()
6516 map->init_slots = tmp; in bpf_object__collect_map_relos()
6517 memset(map->init_slots + map->init_slots_sz, 0, in bpf_object__collect_map_relos()
6518 (new_sz - map->init_slots_sz) * host_ptr_sz); in bpf_object__collect_map_relos()
6519 map->init_slots_sz = new_sz; in bpf_object__collect_map_relos()
6521 map->init_slots[moff] = targ_map; in bpf_object__collect_map_relos()
6524 i, map->name, moff, name); in bpf_object__collect_map_relos()
6535 if (a->insn_idx != b->insn_idx) in cmp_relocs()
6536 return a->insn_idx < b->insn_idx ? -1 : 1; in cmp_relocs()
6539 if (a->type != b->type) in cmp_relocs()
6540 return a->type < b->type ? -1 : 1; in cmp_relocs()
6549 for (i = 0; i < obj->efile.nr_reloc_sects; i++) { in bpf_object__collect_relos()
6550 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; in bpf_object__collect_relos()
6551 Elf_Data *data = obj->efile.reloc_sects[i].data; in bpf_object__collect_relos()
6552 int idx = shdr->sh_info; in bpf_object__collect_relos()
6554 if (shdr->sh_type != SHT_REL) { in bpf_object__collect_relos()
6556 return -LIBBPF_ERRNO__INTERNAL; in bpf_object__collect_relos()
6559 if (idx == obj->efile.st_ops_shndx) in bpf_object__collect_relos()
6561 else if (idx == obj->efile.btf_maps_shndx) in bpf_object__collect_relos()
6569 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__collect_relos()
6570 struct bpf_program *p = &obj->programs[i]; in bpf_object__collect_relos()
6572 if (!p->nr_reloc) in bpf_object__collect_relos()
6575 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); in bpf_object__collect_relos()
6582 if (BPF_CLASS(insn->code) == BPF_JMP && in insn_is_helper_call()
6583 BPF_OP(insn->code) == BPF_CALL && in insn_is_helper_call()
6584 BPF_SRC(insn->code) == BPF_K && in insn_is_helper_call()
6585 insn->src_reg == 0 && in insn_is_helper_call()
6586 insn->dst_reg == 0) { in insn_is_helper_call()
6587 *func_id = insn->imm; in insn_is_helper_call()
6595 struct bpf_insn *insn = prog->insns; in bpf_object__sanitize_prog()
6599 for (i = 0; i < prog->insns_cnt; i++, insn++) { in bpf_object__sanitize_prog()
6611 insn->imm = BPF_FUNC_probe_read; in bpf_object__sanitize_prog()
6616 insn->imm = BPF_FUNC_probe_read_str; in bpf_object__sanitize_prog()
6636 return -EINVAL; in load_program()
6639 load_attr.prog_type = prog->type; in load_program()
6641 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && in load_program()
6642 prog->sec_def->is_exp_attach_type_optional) in load_program()
6645 load_attr.expected_attach_type = prog->expected_attach_type; in load_program()
6647 load_attr.name = prog->name; in load_program()
6651 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in load_program()
6652 prog->type == BPF_PROG_TYPE_LSM) { in load_program()
6653 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6654 } else if (prog->type == BPF_PROG_TYPE_TRACING || in load_program()
6655 prog->type == BPF_PROG_TYPE_EXT) { in load_program()
6656 load_attr.attach_prog_fd = prog->attach_prog_fd; in load_program()
6657 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6660 load_attr.prog_ifindex = prog->prog_ifindex; in load_program()
6663 btf_fd = bpf_object__btf_fd(prog->obj); in load_program()
6666 load_attr.func_info = prog->func_info; in load_program()
6667 load_attr.func_info_rec_size = prog->func_info_rec_size; in load_program()
6668 load_attr.func_info_cnt = prog->func_info_cnt; in load_program()
6669 load_attr.line_info = prog->line_info; in load_program()
6670 load_attr.line_info_rec_size = prog->line_info_rec_size; in load_program()
6671 load_attr.line_info_cnt = prog->line_info_cnt; in load_program()
6673 load_attr.log_level = prog->log_level; in load_program()
6674 load_attr.prog_flags = prog->prog_flags; in load_program()
6680 return -ENOMEM; in load_program()
6691 if (prog->obj->rodata_map_idx >= 0 && in load_program()
6694 &prog->obj->maps[prog->obj->rodata_map_idx]; in load_program()
6699 prog->name, cp); in load_program()
6716 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; in load_program()
6722 ret = -LIBBPF_ERRNO__VERIFY; in load_program()
6723 pr_warn("-- BEGIN DUMP LOG ---\n"); in load_program()
6725 pr_warn("-- END LOG --\n"); in load_program()
6729 ret = -LIBBPF_ERRNO__PROG2BIG; in load_program()
6739 ret = -LIBBPF_ERRNO__PROGTYPE; in load_program()
6755 if (prog->obj->loaded) { in bpf_program__load()
6756 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); in bpf_program__load()
6757 return -EINVAL; in bpf_program__load()
6760 if ((prog->type == BPF_PROG_TYPE_TRACING || in bpf_program__load()
6761 prog->type == BPF_PROG_TYPE_LSM || in bpf_program__load()
6762 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { in bpf_program__load()
6766 prog->attach_btf_id = btf_id; in bpf_program__load()
6769 if (prog->instances.nr < 0 || !prog->instances.fds) { in bpf_program__load()
6770 if (prog->preprocessor) { in bpf_program__load()
6772 prog->name); in bpf_program__load()
6773 return -LIBBPF_ERRNO__INTERNAL; in bpf_program__load()
6776 prog->instances.fds = malloc(sizeof(int)); in bpf_program__load()
6777 if (!prog->instances.fds) { in bpf_program__load()
6779 return -ENOMEM; in bpf_program__load()
6781 prog->instances.nr = 1; in bpf_program__load()
6782 prog->instances.fds[0] = -1; in bpf_program__load()
6785 if (!prog->preprocessor) { in bpf_program__load()
6786 if (prog->instances.nr != 1) { in bpf_program__load()
6788 prog->name, prog->instances.nr); in bpf_program__load()
6790 err = load_program(prog, prog->insns, prog->insns_cnt, in bpf_program__load()
6793 prog->instances.fds[0] = fd; in bpf_program__load()
6797 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__load()
6799 bpf_program_prep_t preprocessor = prog->preprocessor; in bpf_program__load()
6802 err = preprocessor(prog, i, prog->insns, in bpf_program__load()
6803 prog->insns_cnt, &result); in bpf_program__load()
6806 i, prog->name); in bpf_program__load()
6812 i, prog->name); in bpf_program__load()
6813 prog->instances.fds[i] = -1; in bpf_program__load()
6815 *result.pfd = -1; in bpf_program__load()
6823 i, prog->name); in bpf_program__load()
6829 prog->instances.fds[i] = fd; in bpf_program__load()
6833 pr_warn("failed to load program '%s'\n", prog->name); in bpf_program__load()
6834 zfree(&prog->insns); in bpf_program__load()
6835 prog->insns_cnt = 0; in bpf_program__load()
6846 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6847 prog = &obj->programs[i]; in bpf_object__load_progs()
6853 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6854 prog = &obj->programs[i]; in bpf_object__load_progs()
6857 if (!prog->load) { in bpf_object__load_progs()
6858 pr_debug("prog '%s': skipped loading\n", prog->name); in bpf_object__load_progs()
6861 prog->log_level |= log_level; in bpf_object__load_progs()
6862 err = bpf_program__load(prog, obj->license, obj->kern_version); in bpf_object__load_progs()
6884 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); in __bpf_object__open()
6888 return ERR_PTR(-EINVAL); in __bpf_object__open()
6893 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", in __bpf_object__open()
6908 obj->kconfig = strdup(kconfig); in __bpf_object__open()
6909 if (!obj->kconfig) in __bpf_object__open()
6910 return ERR_PTR(-ENOMEM); in __bpf_object__open()
6925 prog->sec_def = find_sec_def(prog->sec_name); in __bpf_object__open()
6926 if (!prog->sec_def) in __bpf_object__open()
6930 if (prog->sec_def->is_sleepable) in __bpf_object__open()
6931 prog->prog_flags |= BPF_F_SLEEPABLE; in __bpf_object__open()
6932 bpf_program__set_type(prog, prog->sec_def->prog_type); in __bpf_object__open()
6934 prog->sec_def->expected_attach_type); in __bpf_object__open()
6936 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || in __bpf_object__open()
6937 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) in __bpf_object__open()
6938 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); in __bpf_object__open()
6955 if (!attr->file) in __bpf_object__open_xattr()
6958 pr_debug("loading %s\n", attr->file); in __bpf_object__open_xattr()
6959 return __bpf_object__open(attr->file, NULL, 0, &opts); in __bpf_object__open_xattr()
6981 return ERR_PTR(-EINVAL); in bpf_object__open_file()
6993 return ERR_PTR(-EINVAL); in bpf_object__open_mem()
7004 /* wrong default, but backwards-compatible */ in bpf_object__open_buffer()
7008 /* returning NULL is wrong, but backwards-compatible */ in bpf_object__open_buffer()
7020 return -EINVAL; in bpf_object__unload()
7022 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__unload()
7023 zclose(obj->maps[i].fd); in bpf_object__unload()
7024 if (obj->maps[i].st_ops) in bpf_object__unload()
7025 zfree(&obj->maps[i].st_ops->kern_vdata); in bpf_object__unload()
7028 for (i = 0; i < obj->nr_programs; i++) in bpf_object__unload()
7029 bpf_program__unload(&obj->programs[i]); in bpf_object__unload()
7043 return -ENOTSUP; in bpf_object__sanitize_maps()
7046 m->def.map_flags ^= BPF_F_MMAPABLE; in bpf_object__sanitize_maps()
7062 err = -errno; in bpf_object__read_kallsyms_file()
7074 err = -EINVAL; in bpf_object__read_kallsyms_file()
7079 if (!ext || ext->type != EXT_KSYM) in bpf_object__read_kallsyms_file()
7082 if (ext->is_set && ext->ksym.addr != sym_addr) { in bpf_object__read_kallsyms_file()
7084 sym_name, ext->ksym.addr, sym_addr); in bpf_object__read_kallsyms_file()
7085 err = -EINVAL; in bpf_object__read_kallsyms_file()
7088 if (!ext->is_set) { in bpf_object__read_kallsyms_file()
7089 ext->is_set = true; in bpf_object__read_kallsyms_file()
7090 ext->ksym.addr = sym_addr; in bpf_object__read_kallsyms_file()
7105 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_ksyms_btf_id()
7111 ext = &obj->externs[i]; in bpf_object__resolve_ksyms_btf_id()
7112 if (ext->type != EXT_KSYM || !ext->ksym.type_id) in bpf_object__resolve_ksyms_btf_id()
7115 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name, in bpf_object__resolve_ksyms_btf_id()
7119 ext->name); in bpf_object__resolve_ksyms_btf_id()
7120 return -ESRCH; in bpf_object__resolve_ksyms_btf_id()
7124 local_type_id = ext->ksym.type_id; in bpf_object__resolve_ksyms_btf_id()
7127 targ_var = btf__type_by_id(obj->btf_vmlinux, id); in bpf_object__resolve_ksyms_btf_id()
7128 targ_var_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7129 targ_var->name_off); in bpf_object__resolve_ksyms_btf_id()
7130 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7131 targ_var->type, in bpf_object__resolve_ksyms_btf_id()
7134 ret = bpf_core_types_are_compat(obj->btf, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7135 obj->btf_vmlinux, targ_type_id); in bpf_object__resolve_ksyms_btf_id()
7140 local_type = btf__type_by_id(obj->btf, local_type_id); in bpf_object__resolve_ksyms_btf_id()
7141 local_name = btf__name_by_offset(obj->btf, in bpf_object__resolve_ksyms_btf_id()
7142 local_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7143 targ_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7144 targ_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7147 ext->name, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7150 return -EINVAL; in bpf_object__resolve_ksyms_btf_id()
7153 ext->is_set = true; in bpf_object__resolve_ksyms_btf_id()
7154 ext->ksym.vmlinux_btf_id = id; in bpf_object__resolve_ksyms_btf_id()
7156 ext->name, id, btf_kind_str(targ_var), targ_var_name); in bpf_object__resolve_ksyms_btf_id()
7170 if (obj->nr_extern == 0) in bpf_object__resolve_externs()
7173 if (obj->kconfig_map_idx >= 0) in bpf_object__resolve_externs()
7174 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; in bpf_object__resolve_externs()
7176 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7177 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7179 if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7180 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { in bpf_object__resolve_externs()
7181 void *ext_val = kcfg_data + ext->kcfg.data_off; in bpf_object__resolve_externs()
7186 return -EINVAL; in bpf_object__resolve_externs()
7191 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); in bpf_object__resolve_externs()
7192 } else if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7193 strncmp(ext->name, "CONFIG_", 7) == 0) { in bpf_object__resolve_externs()
7195 } else if (ext->type == EXT_KSYM) { in bpf_object__resolve_externs()
7196 if (ext->ksym.type_id) in bpf_object__resolve_externs()
7201 pr_warn("unrecognized extern '%s'\n", ext->name); in bpf_object__resolve_externs()
7202 return -EINVAL; in bpf_object__resolve_externs()
7208 return -EINVAL; in bpf_object__resolve_externs()
7210 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7211 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7212 if (ext->type == EXT_KCFG && !ext->is_set) { in bpf_object__resolve_externs()
7221 return -EINVAL; in bpf_object__resolve_externs()
7226 return -EINVAL; in bpf_object__resolve_externs()
7231 return -EINVAL; in bpf_object__resolve_externs()
7233 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7234 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7236 if (!ext->is_set && !ext->is_weak) { in bpf_object__resolve_externs()
7237 pr_warn("extern %s (strong) not resolved\n", ext->name); in bpf_object__resolve_externs()
7238 return -ESRCH; in bpf_object__resolve_externs()
7239 } else if (!ext->is_set) { in bpf_object__resolve_externs()
7241 ext->name); in bpf_object__resolve_externs()
7254 return -EINVAL; in bpf_object__load_xattr()
7255 obj = attr->obj; in bpf_object__load_xattr()
7257 return -EINVAL; in bpf_object__load_xattr()
7259 if (obj->loaded) { in bpf_object__load_xattr()
7260 pr_warn("object '%s': load can't be attempted twice\n", obj->name); in bpf_object__load_xattr()
7261 return -EINVAL; in bpf_object__load_xattr()
7266 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); in bpf_object__load_xattr()
7271 err = err ? : bpf_object__relocate(obj, attr->target_btf_path); in bpf_object__load_xattr()
7272 err = err ? : bpf_object__load_progs(obj, attr->log_level); in bpf_object__load_xattr()
7274 btf__free(obj->btf_vmlinux); in bpf_object__load_xattr()
7275 obj->btf_vmlinux = NULL; in bpf_object__load_xattr()
7277 obj->loaded = true; /* doesn't matter if successfully or not */ in bpf_object__load_xattr()
7284 /* unpin any maps that were auto-pinned during load */ in bpf_object__load_xattr()
7285 for (i = 0; i < obj->nr_maps; i++) in bpf_object__load_xattr()
7286 if (obj->maps[i].pinned && !obj->maps[i].reused) in bpf_object__load_xattr()
7287 bpf_map__unpin(&obj->maps[i], NULL); in bpf_object__load_xattr()
7290 pr_warn("failed to load object '%s'\n", obj->path); in bpf_object__load_xattr()
7311 return -ENOMEM; in make_parent_dir()
7315 err = -errno; in make_parent_dir()
7319 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in make_parent_dir()
7333 return -EINVAL; in check_path()
7337 return -ENOMEM; in check_path()
7343 err = -errno; in check_path()
7349 err = -EINVAL; in check_path()
7371 return -EINVAL; in bpf_program__pin_instance()
7374 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__pin_instance()
7376 instance, prog->name, prog->instances.nr); in bpf_program__pin_instance()
7377 return -EINVAL; in bpf_program__pin_instance()
7380 if (bpf_obj_pin(prog->instances.fds[instance], path)) { in bpf_program__pin_instance()
7381 err = -errno; in bpf_program__pin_instance()
7402 return -EINVAL; in bpf_program__unpin_instance()
7405 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__unpin_instance()
7407 instance, prog->name, prog->instances.nr); in bpf_program__unpin_instance()
7408 return -EINVAL; in bpf_program__unpin_instance()
7413 return -errno; in bpf_program__unpin_instance()
7433 return -EINVAL; in bpf_program__pin()
7436 if (prog->instances.nr <= 0) { in bpf_program__pin()
7437 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__pin()
7438 return -EINVAL; in bpf_program__pin()
7441 if (prog->instances.nr == 1) { in bpf_program__pin()
7446 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__pin()
7452 err = -EINVAL; in bpf_program__pin()
7455 err = -ENAMETOOLONG; in bpf_program__pin()
7467 for (i = i - 1; i >= 0; i--) { in bpf_program__pin()
7495 return -EINVAL; in bpf_program__unpin()
7498 if (prog->instances.nr <= 0) { in bpf_program__unpin()
7499 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__unpin()
7500 return -EINVAL; in bpf_program__unpin()
7503 if (prog->instances.nr == 1) { in bpf_program__unpin()
7508 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__unpin()
7514 return -EINVAL; in bpf_program__unpin()
7516 return -ENAMETOOLONG; in bpf_program__unpin()
7525 return -errno; in bpf_program__unpin()
7537 return -EINVAL; in bpf_map__pin()
7540 if (map->pin_path) { in bpf_map__pin()
7541 if (path && strcmp(path, map->pin_path)) { in bpf_map__pin()
7543 bpf_map__name(map), map->pin_path, path); in bpf_map__pin()
7544 return -EINVAL; in bpf_map__pin()
7545 } else if (map->pinned) { in bpf_map__pin()
7546 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", in bpf_map__pin()
7547 bpf_map__name(map), map->pin_path); in bpf_map__pin()
7554 return -EINVAL; in bpf_map__pin()
7555 } else if (map->pinned) { in bpf_map__pin()
7557 return -EEXIST; in bpf_map__pin()
7560 map->pin_path = strdup(path); in bpf_map__pin()
7561 if (!map->pin_path) { in bpf_map__pin()
7562 err = -errno; in bpf_map__pin()
7567 err = make_parent_dir(map->pin_path); in bpf_map__pin()
7571 err = check_path(map->pin_path); in bpf_map__pin()
7575 if (bpf_obj_pin(map->fd, map->pin_path)) { in bpf_map__pin()
7576 err = -errno; in bpf_map__pin()
7580 map->pinned = true; in bpf_map__pin()
7581 pr_debug("pinned map '%s'\n", map->pin_path); in bpf_map__pin()
7586 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_map__pin()
7597 return -EINVAL; in bpf_map__unpin()
7600 if (map->pin_path) { in bpf_map__unpin()
7601 if (path && strcmp(path, map->pin_path)) { in bpf_map__unpin()
7603 bpf_map__name(map), map->pin_path, path); in bpf_map__unpin()
7604 return -EINVAL; in bpf_map__unpin()
7606 path = map->pin_path; in bpf_map__unpin()
7610 return -EINVAL; in bpf_map__unpin()
7619 return -errno; in bpf_map__unpin()
7621 map->pinned = false; in bpf_map__unpin()
7634 return -errno; in bpf_map__set_pin_path()
7637 free(map->pin_path); in bpf_map__set_pin_path()
7638 map->pin_path = new; in bpf_map__set_pin_path()
7644 return map->pin_path; in bpf_map__get_pin_path()
7649 return map->pinned; in bpf_map__is_pinned()
7658 return -ENOENT; in bpf_object__pin_maps()
7660 if (!obj->loaded) { in bpf_object__pin_maps()
7662 return -ENOENT; in bpf_object__pin_maps()
7675 err = -EINVAL; in bpf_object__pin_maps()
7678 err = -ENAMETOOLONG; in bpf_object__pin_maps()
7682 } else if (!map->pin_path) { in bpf_object__pin_maps()
7695 if (!map->pin_path) in bpf_object__pin_maps()
7710 return -ENOENT; in bpf_object__unpin_maps()
7722 return -EINVAL; in bpf_object__unpin_maps()
7724 return -ENAMETOOLONG; in bpf_object__unpin_maps()
7726 } else if (!map->pin_path) { in bpf_object__unpin_maps()
7744 return -ENOENT; in bpf_object__pin_programs()
7746 if (!obj->loaded) { in bpf_object__pin_programs()
7748 return -ENOENT; in bpf_object__pin_programs()
7756 prog->pin_name); in bpf_object__pin_programs()
7758 err = -EINVAL; in bpf_object__pin_programs()
7761 err = -ENAMETOOLONG; in bpf_object__pin_programs()
7778 prog->pin_name); in bpf_object__pin_programs()
7796 return -ENOENT; in bpf_object__unpin_programs()
7803 prog->pin_name); in bpf_object__unpin_programs()
7805 return -EINVAL; in bpf_object__unpin_programs()
7807 return -ENAMETOOLONG; in bpf_object__unpin_programs()
7836 if (map->clear_priv) in bpf_map__destroy()
7837 map->clear_priv(map, map->priv); in bpf_map__destroy()
7838 map->priv = NULL; in bpf_map__destroy()
7839 map->clear_priv = NULL; in bpf_map__destroy()
7841 if (map->inner_map) { in bpf_map__destroy()
7842 bpf_map__destroy(map->inner_map); in bpf_map__destroy()
7843 zfree(&map->inner_map); in bpf_map__destroy()
7846 zfree(&map->init_slots); in bpf_map__destroy()
7847 map->init_slots_sz = 0; in bpf_map__destroy()
7849 if (map->mmaped) { in bpf_map__destroy()
7850 munmap(map->mmaped, bpf_map_mmap_sz(map)); in bpf_map__destroy()
7851 map->mmaped = NULL; in bpf_map__destroy()
7854 if (map->st_ops) { in bpf_map__destroy()
7855 zfree(&map->st_ops->data); in bpf_map__destroy()
7856 zfree(&map->st_ops->progs); in bpf_map__destroy()
7857 zfree(&map->st_ops->kern_func_off); in bpf_map__destroy()
7858 zfree(&map->st_ops); in bpf_map__destroy()
7861 zfree(&map->name); in bpf_map__destroy()
7862 zfree(&map->pin_path); in bpf_map__destroy()
7864 if (map->fd >= 0) in bpf_map__destroy()
7865 zclose(map->fd); in bpf_map__destroy()
7875 if (obj->clear_priv) in bpf_object__close()
7876 obj->clear_priv(obj, obj->priv); in bpf_object__close()
7880 btf__free(obj->btf); in bpf_object__close()
7881 btf_ext__free(obj->btf_ext); in bpf_object__close()
7883 for (i = 0; i < obj->nr_maps; i++) in bpf_object__close()
7884 bpf_map__destroy(&obj->maps[i]); in bpf_object__close()
7886 zfree(&obj->kconfig); in bpf_object__close()
7887 zfree(&obj->externs); in bpf_object__close()
7888 obj->nr_extern = 0; in bpf_object__close()
7890 zfree(&obj->maps); in bpf_object__close()
7891 obj->nr_maps = 0; in bpf_object__close()
7893 if (obj->programs && obj->nr_programs) { in bpf_object__close()
7894 for (i = 0; i < obj->nr_programs; i++) in bpf_object__close()
7895 bpf_program__exit(&obj->programs[i]); in bpf_object__close()
7897 zfree(&obj->programs); in bpf_object__close()
7899 list_del(&obj->list); in bpf_object__close()
7916 if (&next->list == &bpf_objects_list) in bpf_object__next()
7924 return obj ? obj->name : ERR_PTR(-EINVAL); in bpf_object__name()
7929 return obj ? obj->kern_version : 0; in bpf_object__kversion()
7934 return obj ? obj->btf : NULL; in bpf_object__btf()
7939 return obj->btf ? btf__fd(obj->btf) : -1; in bpf_object__btf_fd()
7945 if (obj->priv && obj->clear_priv) in bpf_object__set_priv()
7946 obj->clear_priv(obj, obj->priv); in bpf_object__set_priv()
7948 obj->priv = priv; in bpf_object__set_priv()
7949 obj->clear_priv = clear_priv; in bpf_object__set_priv()
7955 return obj ? obj->priv : ERR_PTR(-EINVAL); in bpf_object__priv()
7962 size_t nr_programs = obj->nr_programs; in __bpf_program__iter()
7970 return forward ? &obj->programs[0] : in __bpf_program__iter()
7971 &obj->programs[nr_programs - 1]; in __bpf_program__iter()
7973 if (p->obj != obj) { in __bpf_program__iter()
7978 idx = (p - obj->programs) + (forward ? 1 : -1); in __bpf_program__iter()
7979 if (idx >= obj->nr_programs || idx < 0) in __bpf_program__iter()
7981 return &obj->programs[idx]; in __bpf_program__iter()
8011 if (prog->priv && prog->clear_priv) in bpf_program__set_priv()
8012 prog->clear_priv(prog, prog->priv); in bpf_program__set_priv()
8014 prog->priv = priv; in bpf_program__set_priv()
8015 prog->clear_priv = clear_priv; in bpf_program__set_priv()
8021 return prog ? prog->priv : ERR_PTR(-EINVAL); in bpf_program__priv()
8026 prog->prog_ifindex = ifindex; in bpf_program__set_ifindex()
8031 return prog->name; in bpf_program__name()
8036 return prog->sec_name; in bpf_program__section_name()
8043 title = prog->sec_name; in bpf_program__title()
8048 return ERR_PTR(-ENOMEM); in bpf_program__title()
8057 return prog->load; in bpf_program__autoload()
8062 if (prog->obj->loaded) in bpf_program__set_autoload()
8063 return -EINVAL; in bpf_program__set_autoload()
8065 prog->load = autoload; in bpf_program__set_autoload()
8076 return prog->insns_cnt * BPF_INSN_SZ; in bpf_program__size()
8085 return -EINVAL; in bpf_program__set_prep()
8087 if (prog->instances.nr > 0 || prog->instances.fds) { in bpf_program__set_prep()
8088 pr_warn("Can't set pre-processor after loading\n"); in bpf_program__set_prep()
8089 return -EINVAL; in bpf_program__set_prep()
8095 return -ENOMEM; in bpf_program__set_prep()
8098 /* fill all fd with -1 */ in bpf_program__set_prep()
8099 memset(instances_fds, -1, sizeof(int) * nr_instances); in bpf_program__set_prep()
8101 prog->instances.nr = nr_instances; in bpf_program__set_prep()
8102 prog->instances.fds = instances_fds; in bpf_program__set_prep()
8103 prog->preprocessor = prep; in bpf_program__set_prep()
8112 return -EINVAL; in bpf_program__nth_fd()
8114 if (n >= prog->instances.nr || n < 0) { in bpf_program__nth_fd()
8116 n, prog->name, prog->instances.nr); in bpf_program__nth_fd()
8117 return -EINVAL; in bpf_program__nth_fd()
8120 fd = prog->instances.fds[n]; in bpf_program__nth_fd()
8123 n, prog->name); in bpf_program__nth_fd()
8124 return -ENOENT; in bpf_program__nth_fd()
8132 return prog->type; in bpf_program__get_type()
8137 prog->type = type; in bpf_program__set_type()
8143 return prog ? (prog->type == type) : false; in bpf_program__is_type()
8150 return -EINVAL; \
8177 return prog->expected_attach_type; in bpf_program__get_expected_attach_type()
8183 prog->expected_attach_type = type; in bpf_program__set_expected_attach_type()
8190 .len = sizeof(string) - 1, \
8220 .len = sizeof(sec_pfx) - 1, \
8435 return -EINVAL; in libbpf_prog_type_by_name()
8439 *prog_type = sec_def->prog_type; in libbpf_prog_type_by_name()
8440 *expected_attach_type = sec_def->expected_attach_type; in libbpf_prog_type_by_name()
8451 return -ESRCH; in libbpf_prog_type_by_name()
8460 for (i = 0; i < obj->nr_maps; i++) { in find_struct_ops_map_by_offset()
8461 map = &obj->maps[i]; in find_struct_ops_map_by_offset()
8464 if (map->sec_offset <= offset && in find_struct_ops_map_by_offset()
8465 offset - map->sec_offset < map->def.value_size) in find_struct_ops_map_by_offset()
8472 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8490 symbols = obj->efile.symbols; in bpf_object__collect_st_ops_relos()
8491 btf = obj->btf; in bpf_object__collect_st_ops_relos()
8492 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_st_ops_relos()
8496 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8502 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8510 return -EINVAL; in bpf_object__collect_st_ops_relos()
8513 moff = rel.r_offset - map->sec_offset; in bpf_object__collect_st_ops_relos()
8515 st_ops = map->st_ops; in bpf_object__collect_st_ops_relos()
8516 …pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %z… in bpf_object__collect_st_ops_relos()
8517 map->name, in bpf_object__collect_st_ops_relos()
8521 map->sec_offset, sym.st_name, name); in bpf_object__collect_st_ops_relos()
8524 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", in bpf_object__collect_st_ops_relos()
8525 map->name, (size_t)rel.r_offset, shdr_idx); in bpf_object__collect_st_ops_relos()
8526 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_st_ops_relos()
8530 map->name, (unsigned long long)sym.st_value); in bpf_object__collect_st_ops_relos()
8531 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8535 member = find_member_by_offset(st_ops->type, moff * 8); in bpf_object__collect_st_ops_relos()
8538 map->name, moff); in bpf_object__collect_st_ops_relos()
8539 return -EINVAL; in bpf_object__collect_st_ops_relos()
8541 member_idx = member - btf_members(st_ops->type); in bpf_object__collect_st_ops_relos()
8542 name = btf__name_by_offset(btf, member->name_off); in bpf_object__collect_st_ops_relos()
8544 if (!resolve_func_ptr(btf, member->type, NULL)) { in bpf_object__collect_st_ops_relos()
8546 map->name, name); in bpf_object__collect_st_ops_relos()
8547 return -EINVAL; in bpf_object__collect_st_ops_relos()
8553 map->name, shdr_idx, name); in bpf_object__collect_st_ops_relos()
8554 return -EINVAL; in bpf_object__collect_st_ops_relos()
8557 if (prog->type == BPF_PROG_TYPE_UNSPEC) { in bpf_object__collect_st_ops_relos()
8560 sec_def = find_sec_def(prog->sec_name); in bpf_object__collect_st_ops_relos()
8562 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { in bpf_object__collect_st_ops_relos()
8564 prog->type = sec_def->prog_type; in bpf_object__collect_st_ops_relos()
8568 prog->type = BPF_PROG_TYPE_STRUCT_OPS; in bpf_object__collect_st_ops_relos()
8569 prog->attach_btf_id = st_ops->type_id; in bpf_object__collect_st_ops_relos()
8570 prog->expected_attach_type = member_idx; in bpf_object__collect_st_ops_relos()
8571 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || in bpf_object__collect_st_ops_relos()
8572 prog->attach_btf_id != st_ops->type_id || in bpf_object__collect_st_ops_relos()
8573 prog->expected_attach_type != member_idx) { in bpf_object__collect_st_ops_relos()
8576 st_ops->progs[member_idx] = prog; in bpf_object__collect_st_ops_relos()
8583 map->name, prog->name, prog->sec_name, prog->type, in bpf_object__collect_st_ops_relos()
8584 prog->attach_btf_id, prog->expected_attach_type, name); in bpf_object__collect_st_ops_relos()
8585 return -EINVAL; in bpf_object__collect_st_ops_relos()
8606 return -ENAMETOOLONG; in find_btf_by_prefix_kind()
8642 return -EINVAL; in libbpf_find_vmlinux_btf_id()
8655 int err = -EINVAL; in libbpf_find_prog_btf_id()
8661 return -EINVAL; in libbpf_find_prog_btf_id()
8663 info = &info_linear->info; in libbpf_find_prog_btf_id()
8664 if (!info->btf_id) { in libbpf_find_prog_btf_id()
8668 if (btf__get_from_id(info->btf_id, &btf)) { in libbpf_find_prog_btf_id()
8685 enum bpf_attach_type attach_type = prog->expected_attach_type; in libbpf_find_attach_btf_id()
8686 __u32 attach_prog_fd = prog->attach_prog_fd; in libbpf_find_attach_btf_id()
8687 const char *name = prog->sec_name; in libbpf_find_attach_btf_id()
8691 return -EINVAL; in libbpf_find_attach_btf_id()
8702 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, in libbpf_find_attach_btf_id()
8708 return -ESRCH; in libbpf_find_attach_btf_id()
8718 return -EINVAL; in libbpf_attach_type_by_name()
8724 return -EINVAL; in libbpf_attach_type_by_name()
8735 return -EINVAL; in libbpf_attach_type_by_name()
8740 return map ? map->fd : -EINVAL; in bpf_map__fd()
8745 return map ? &map->def : ERR_PTR(-EINVAL); in bpf_map__def()
8750 return map ? map->name : NULL; in bpf_map__name()
8755 return map->def.type; in bpf_map__type()
8760 if (map->fd >= 0) in bpf_map__set_type()
8761 return -EBUSY; in bpf_map__set_type()
8762 map->def.type = type; in bpf_map__set_type()
8768 return map->def.map_flags; in bpf_map__map_flags()
8773 if (map->fd >= 0) in bpf_map__set_map_flags()
8774 return -EBUSY; in bpf_map__set_map_flags()
8775 map->def.map_flags = flags; in bpf_map__set_map_flags()
8781 return map->numa_node; in bpf_map__numa_node()
8786 if (map->fd >= 0) in bpf_map__set_numa_node()
8787 return -EBUSY; in bpf_map__set_numa_node()
8788 map->numa_node = numa_node; in bpf_map__set_numa_node()
8794 return map->def.key_size; in bpf_map__key_size()
8799 if (map->fd >= 0) in bpf_map__set_key_size()
8800 return -EBUSY; in bpf_map__set_key_size()
8801 map->def.key_size = size; in bpf_map__set_key_size()
8807 return map->def.value_size; in bpf_map__value_size()
8812 if (map->fd >= 0) in bpf_map__set_value_size()
8813 return -EBUSY; in bpf_map__set_value_size()
8814 map->def.value_size = size; in bpf_map__set_value_size()
8820 return map ? map->btf_key_type_id : 0; in bpf_map__btf_key_type_id()
8825 return map ? map->btf_value_type_id : 0; in bpf_map__btf_value_type_id()
8832 return -EINVAL; in bpf_map__set_priv()
8834 if (map->priv) { in bpf_map__set_priv()
8835 if (map->clear_priv) in bpf_map__set_priv()
8836 map->clear_priv(map, map->priv); in bpf_map__set_priv()
8839 map->priv = priv; in bpf_map__set_priv()
8840 map->clear_priv = clear_priv; in bpf_map__set_priv()
8846 return map ? map->priv : ERR_PTR(-EINVAL); in bpf_map__priv()
8852 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || in bpf_map__set_initial_value()
8853 size != map->def.value_size || map->fd >= 0) in bpf_map__set_initial_value()
8854 return -EINVAL; in bpf_map__set_initial_value()
8856 memcpy(map->mmaped, data, size); in bpf_map__set_initial_value()
8862 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map__is_offload_neutral()
8867 return map->libbpf_type != LIBBPF_MAP_UNSPEC; in bpf_map__is_internal()
8872 return map->map_ifindex; in bpf_map__ifindex()
8877 if (map->fd >= 0) in bpf_map__set_ifindex()
8878 return -EBUSY; in bpf_map__set_ifindex()
8879 map->map_ifindex = ifindex; in bpf_map__set_ifindex()
8885 if (!bpf_map_type__is_map_in_map(map->def.type)) { in bpf_map__set_inner_map_fd()
8887 return -EINVAL; in bpf_map__set_inner_map_fd()
8889 if (map->inner_map_fd != -1) { in bpf_map__set_inner_map_fd()
8891 return -EINVAL; in bpf_map__set_inner_map_fd()
8893 map->inner_map_fd = fd; in bpf_map__set_inner_map_fd()
8903 if (!obj || !obj->maps) in __bpf_map__iter()
8906 s = obj->maps; in __bpf_map__iter()
8907 e = obj->maps + obj->nr_maps; in __bpf_map__iter()
8915 idx = (m - obj->maps) + i; in __bpf_map__iter()
8916 if (idx >= obj->nr_maps || idx < 0) in __bpf_map__iter()
8918 return &obj->maps[idx]; in __bpf_map__iter()
8925 return obj->maps; in bpf_map__next()
8934 if (!obj->nr_maps) in bpf_map__prev()
8936 return obj->maps + obj->nr_maps - 1; in bpf_map__prev()
8939 return __bpf_map__iter(next, obj, -1); in bpf_map__prev()
8948 if (pos->name && !strcmp(pos->name, name)) in bpf_object__find_map_by_name()
8963 return ERR_PTR(-ENOTSUP); in bpf_object__find_map_by_offset()
8994 return -EINVAL; in bpf_prog_load_xattr()
8995 if (!attr->file) in bpf_prog_load_xattr()
8996 return -EINVAL; in bpf_prog_load_xattr()
8998 open_attr.file = attr->file; in bpf_prog_load_xattr()
8999 open_attr.prog_type = attr->prog_type; in bpf_prog_load_xattr()
9003 return -ENOENT; in bpf_prog_load_xattr()
9006 enum bpf_attach_type attach_type = attr->expected_attach_type; in bpf_prog_load_xattr()
9009 * attr->prog_type, if specified, as an override to whatever in bpf_prog_load_xattr()
9012 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { in bpf_prog_load_xattr()
9013 bpf_program__set_type(prog, attr->prog_type); in bpf_prog_load_xattr()
9023 return -EINVAL; in bpf_prog_load_xattr()
9026 prog->prog_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9027 prog->log_level = attr->log_level; in bpf_prog_load_xattr()
9028 prog->prog_flags |= attr->prog_flags; in bpf_prog_load_xattr()
9035 map->map_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9041 return -ENOENT; in bpf_prog_load_xattr()
9059 int fd; /* hook FD, -1 if not applicable */
9081 link->disconnected = true; in bpf_link__disconnect()
9091 if (!link->disconnected && link->detach) in bpf_link__destroy()
9092 err = link->detach(link); in bpf_link__destroy()
9093 if (link->destroy) in bpf_link__destroy()
9094 link->destroy(link); in bpf_link__destroy()
9095 if (link->pin_path) in bpf_link__destroy()
9096 free(link->pin_path); in bpf_link__destroy()
9104 return link->fd; in bpf_link__fd()
9109 return link->pin_path; in bpf_link__pin_path()
9114 return close(link->fd); in bpf_link__detach_fd()
9124 fd = -errno; in bpf_link__open()
9132 return ERR_PTR(-ENOMEM); in bpf_link__open()
9134 link->detach = &bpf_link__detach_fd; in bpf_link__open()
9135 link->fd = fd; in bpf_link__open()
9137 link->pin_path = strdup(path); in bpf_link__open()
9138 if (!link->pin_path) { in bpf_link__open()
9140 return ERR_PTR(-ENOMEM); in bpf_link__open()
9148 return bpf_link_detach(link->fd) ? -errno : 0; in bpf_link__detach()
9155 if (link->pin_path) in bpf_link__pin()
9156 return -EBUSY; in bpf_link__pin()
9164 link->pin_path = strdup(path); in bpf_link__pin()
9165 if (!link->pin_path) in bpf_link__pin()
9166 return -ENOMEM; in bpf_link__pin()
9168 if (bpf_obj_pin(link->fd, link->pin_path)) { in bpf_link__pin()
9169 err = -errno; in bpf_link__pin()
9170 zfree(&link->pin_path); in bpf_link__pin()
9174 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); in bpf_link__pin()
9182 if (!link->pin_path) in bpf_link__unpin()
9183 return -EINVAL; in bpf_link__unpin()
9185 err = unlink(link->pin_path); in bpf_link__unpin()
9187 return -errno; in bpf_link__unpin()
9189 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); in bpf_link__unpin()
9190 zfree(&link->pin_path); in bpf_link__unpin()
9198 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); in bpf_link__detach_perf_event()
9200 err = -errno; in bpf_link__detach_perf_event()
9202 close(link->fd); in bpf_link__detach_perf_event()
9215 prog->name, pfd); in bpf_program__attach_perf_event()
9216 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9221 prog->name); in bpf_program__attach_perf_event()
9222 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9227 return ERR_PTR(-ENOMEM); in bpf_program__attach_perf_event()
9228 link->detach = &bpf_link__detach_perf_event; in bpf_program__attach_perf_event()
9229 link->fd = pfd; in bpf_program__attach_perf_event()
9232 err = -errno; in bpf_program__attach_perf_event()
9235 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9236 if (err == -EPROTO) in bpf_program__attach_perf_event()
9238 prog->name, pfd); in bpf_program__attach_perf_event()
9242 err = -errno; in bpf_program__attach_perf_event()
9245 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9252 * this function is expected to parse integer in the range of [0, 2^31-1] from
9264 err = -errno; in parse_uint_from_file()
9271 err = err == EOF ? -EIO : -errno; in parse_uint_from_file()
9343 pid < 0 ? -1 : pid /* pid */, in perf_event_open_probe()
9344 pid == -1 ? 0 : -1 /* cpu */, in perf_event_open_probe()
9345 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_probe()
9347 err = -errno; in perf_event_open_probe()
9365 0 /* offset */, -1 /* pid */); in bpf_program__attach_kprobe()
9368 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9377 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9390 func_name = prog->sec_name + sec->len; in attach_kprobe()
9391 retprobe = strcmp(sec->sec, "kretprobe/") == 0; in attach_kprobe()
9409 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9419 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9437 return -errno; in determine_tracepoint_id()
9441 return -E2BIG; in determine_tracepoint_id()
9465 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, in perf_event_open_tracepoint()
9466 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_tracepoint()
9468 err = -errno; in perf_event_open_tracepoint()
9488 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9497 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9510 sec_name = strdup(prog->sec_name); in attach_tp()
9512 return ERR_PTR(-ENOMEM); in attach_tp()
9515 tp_cat = sec_name + sec->len; in attach_tp()
9518 link = ERR_PTR(-EINVAL); in attach_tp()
9539 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_raw_tracepoint()
9540 return ERR_PTR(-EINVAL); in bpf_program__attach_raw_tracepoint()
9545 return ERR_PTR(-ENOMEM); in bpf_program__attach_raw_tracepoint()
9546 link->detach = &bpf_link__detach_fd; in bpf_program__attach_raw_tracepoint()
9550 pfd = -errno; in bpf_program__attach_raw_tracepoint()
9553 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_raw_tracepoint()
9556 link->fd = pfd; in bpf_program__attach_raw_tracepoint()
9563 const char *tp_name = prog->sec_name + sec->len; in attach_raw_tp()
9577 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_btf_id()
9578 return ERR_PTR(-EINVAL); in bpf_program__attach_btf_id()
9583 return ERR_PTR(-ENOMEM); in bpf_program__attach_btf_id()
9584 link->detach = &bpf_link__detach_fd; in bpf_program__attach_btf_id()
9588 pfd = -errno; in bpf_program__attach_btf_id()
9591 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_btf_id()
9594 link->fd = pfd; in bpf_program__attach_btf_id()
9639 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_fd()
9640 return ERR_PTR(-EINVAL); in bpf_program__attach_fd()
9645 return ERR_PTR(-ENOMEM); in bpf_program__attach_fd()
9646 link->detach = &bpf_link__detach_fd; in bpf_program__attach_fd()
9651 link_fd = -errno; in bpf_program__attach_fd()
9654 prog->name, target_name, in bpf_program__attach_fd()
9658 link->fd = link_fd; in bpf_program__attach_fd()
9688 prog->name); in bpf_program__attach_freplace()
9689 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9692 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_program__attach_freplace()
9694 prog->name); in bpf_program__attach_freplace()
9695 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9723 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9730 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_iter()
9731 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9736 return ERR_PTR(-ENOMEM); in bpf_program__attach_iter()
9737 link->detach = &bpf_link__detach_fd; in bpf_program__attach_iter()
9742 link_fd = -errno; in bpf_program__attach_iter()
9745 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); in bpf_program__attach_iter()
9748 link->fd = link_fd; in bpf_program__attach_iter()
9756 sec_def = find_sec_def(prog->sec_name); in bpf_program__attach()
9757 if (!sec_def || !sec_def->attach_fn) in bpf_program__attach()
9758 return ERR_PTR(-ESRCH); in bpf_program__attach()
9760 return sec_def->attach_fn(sec_def, prog); in bpf_program__attach()
9767 if (bpf_map_delete_elem(link->fd, &zero)) in bpf_link__detach_struct_ops()
9768 return -errno; in bpf_link__detach_struct_ops()
9780 if (!bpf_map__is_struct_ops(map) || map->fd == -1) in bpf_map__attach_struct_ops()
9781 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9785 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9787 st_ops = map->st_ops; in bpf_map__attach_struct_ops()
9788 for (i = 0; i < btf_vlen(st_ops->type); i++) { in bpf_map__attach_struct_ops()
9789 struct bpf_program *prog = st_ops->progs[i]; in bpf_map__attach_struct_ops()
9797 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; in bpf_map__attach_struct_ops()
9801 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); in bpf_map__attach_struct_ops()
9803 err = -errno; in bpf_map__attach_struct_ops()
9808 link->detach = bpf_link__detach_struct_ops; in bpf_map__attach_struct_ops()
9809 link->fd = map->fd; in bpf_map__attach_struct_ops()
9821 __u64 data_tail = header->data_tail; in bpf_perf_event_read_simple()
9828 ehdr = base + (data_tail & (mmap_size - 1)); in bpf_perf_event_read_simple()
9829 ehdr_size = ehdr->size; in bpf_perf_event_read_simple()
9833 size_t len_first = base + mmap_size - copy_start; in bpf_perf_event_read_simple()
9834 size_t len_secnd = ehdr_size - len_first; in bpf_perf_event_read_simple()
9868 /* sample_cb and lost_cb are higher-level common-case callbacks */
9907 if (cpu_buf->base && in perf_buffer__free_cpu_buf()
9908 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) in perf_buffer__free_cpu_buf()
9909 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); in perf_buffer__free_cpu_buf()
9910 if (cpu_buf->fd >= 0) { in perf_buffer__free_cpu_buf()
9911 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); in perf_buffer__free_cpu_buf()
9912 close(cpu_buf->fd); in perf_buffer__free_cpu_buf()
9914 free(cpu_buf->buf); in perf_buffer__free_cpu_buf()
9924 if (pb->cpu_bufs) { in perf_buffer__free()
9925 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free()
9926 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__free()
9931 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); in perf_buffer__free()
9934 free(pb->cpu_bufs); in perf_buffer__free()
9936 if (pb->epoll_fd >= 0) in perf_buffer__free()
9937 close(pb->epoll_fd); in perf_buffer__free()
9938 free(pb->events); in perf_buffer__free()
9952 return ERR_PTR(-ENOMEM); in perf_buffer__open_cpu_buf()
9954 cpu_buf->pb = pb; in perf_buffer__open_cpu_buf()
9955 cpu_buf->cpu = cpu; in perf_buffer__open_cpu_buf()
9956 cpu_buf->map_key = map_key; in perf_buffer__open_cpu_buf()
9958 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, in perf_buffer__open_cpu_buf()
9959 -1, PERF_FLAG_FD_CLOEXEC); in perf_buffer__open_cpu_buf()
9960 if (cpu_buf->fd < 0) { in perf_buffer__open_cpu_buf()
9961 err = -errno; in perf_buffer__open_cpu_buf()
9967 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, in perf_buffer__open_cpu_buf()
9969 cpu_buf->fd, 0); in perf_buffer__open_cpu_buf()
9970 if (cpu_buf->base == MAP_FAILED) { in perf_buffer__open_cpu_buf()
9971 cpu_buf->base = NULL; in perf_buffer__open_cpu_buf()
9972 err = -errno; in perf_buffer__open_cpu_buf()
9978 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { in perf_buffer__open_cpu_buf()
9979 err = -errno; in perf_buffer__open_cpu_buf()
10008 p.sample_cb = opts ? opts->sample_cb : NULL; in perf_buffer__new()
10009 p.lost_cb = opts ? opts->lost_cb : NULL; in perf_buffer__new()
10010 p.ctx = opts ? opts->ctx : NULL; in perf_buffer__new()
10021 p.attr = opts->attr; in perf_buffer__new_raw()
10022 p.event_cb = opts->event_cb; in perf_buffer__new_raw()
10023 p.ctx = opts->ctx; in perf_buffer__new_raw()
10024 p.cpu_cnt = opts->cpu_cnt; in perf_buffer__new_raw()
10025 p.cpus = opts->cpus; in perf_buffer__new_raw()
10026 p.map_keys = opts->map_keys; in perf_buffer__new_raw()
10042 if (page_cnt & (page_cnt - 1)) { in __perf_buffer__new()
10045 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10048 /* best-effort sanity checks */ in __perf_buffer__new()
10053 err = -errno; in __perf_buffer__new()
10055 * -EBADFD, -EFAULT, or -E2BIG on real error in __perf_buffer__new()
10057 if (err != -EINVAL) { in __perf_buffer__new()
10068 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10074 return ERR_PTR(-ENOMEM); in __perf_buffer__new()
10076 pb->event_cb = p->event_cb; in __perf_buffer__new()
10077 pb->sample_cb = p->sample_cb; in __perf_buffer__new()
10078 pb->lost_cb = p->lost_cb; in __perf_buffer__new()
10079 pb->ctx = p->ctx; in __perf_buffer__new()
10081 pb->page_size = getpagesize(); in __perf_buffer__new()
10082 pb->mmap_size = pb->page_size * page_cnt; in __perf_buffer__new()
10083 pb->map_fd = map_fd; in __perf_buffer__new()
10085 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in __perf_buffer__new()
10086 if (pb->epoll_fd < 0) { in __perf_buffer__new()
10087 err = -errno; in __perf_buffer__new()
10093 if (p->cpu_cnt > 0) { in __perf_buffer__new()
10094 pb->cpu_cnt = p->cpu_cnt; in __perf_buffer__new()
10096 pb->cpu_cnt = libbpf_num_possible_cpus(); in __perf_buffer__new()
10097 if (pb->cpu_cnt < 0) { in __perf_buffer__new()
10098 err = pb->cpu_cnt; in __perf_buffer__new()
10101 if (map.max_entries && map.max_entries < pb->cpu_cnt) in __perf_buffer__new()
10102 pb->cpu_cnt = map.max_entries; in __perf_buffer__new()
10105 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); in __perf_buffer__new()
10106 if (!pb->events) { in __perf_buffer__new()
10107 err = -ENOMEM; in __perf_buffer__new()
10111 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); in __perf_buffer__new()
10112 if (!pb->cpu_bufs) { in __perf_buffer__new()
10113 err = -ENOMEM; in __perf_buffer__new()
10124 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { in __perf_buffer__new()
10128 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; in __perf_buffer__new()
10129 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; in __perf_buffer__new()
10134 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) in __perf_buffer__new()
10137 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); in __perf_buffer__new()
10143 pb->cpu_bufs[j] = cpu_buf; in __perf_buffer__new()
10145 err = bpf_map_update_elem(pb->map_fd, &map_key, in __perf_buffer__new()
10146 &cpu_buf->fd, 0); in __perf_buffer__new()
10148 err = -errno; in __perf_buffer__new()
10149 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", in __perf_buffer__new()
10150 cpu, map_key, cpu_buf->fd, in __perf_buffer__new()
10155 pb->events[j].events = EPOLLIN; in __perf_buffer__new()
10156 pb->events[j].data.ptr = cpu_buf; in __perf_buffer__new()
10157 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, in __perf_buffer__new()
10158 &pb->events[j]) < 0) { in __perf_buffer__new()
10159 err = -errno; in __perf_buffer__new()
10161 cpu, cpu_buf->fd, in __perf_buffer__new()
10167 pb->cpu_cnt = j; in __perf_buffer__new()
10196 struct perf_buffer *pb = cpu_buf->pb; in perf_buffer__process_record()
10200 if (pb->event_cb) in perf_buffer__process_record()
10201 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); in perf_buffer__process_record()
10203 switch (e->type) { in perf_buffer__process_record()
10207 if (pb->sample_cb) in perf_buffer__process_record()
10208 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); in perf_buffer__process_record()
10214 if (pb->lost_cb) in perf_buffer__process_record()
10215 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); in perf_buffer__process_record()
10219 pr_warn("unknown perf sample type %d\n", e->type); in perf_buffer__process_record()
10230 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, in perf_buffer__process_records()
10231 pb->page_size, &cpu_buf->buf, in perf_buffer__process_records()
10232 &cpu_buf->buf_size, in perf_buffer__process_records()
10241 return pb->epoll_fd; in perf_buffer__epoll_fd()
10248 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); in perf_buffer__poll()
10250 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; in perf_buffer__poll()
10258 return cnt < 0 ? -errno : cnt; in perf_buffer__poll()
10266 return pb->cpu_cnt; in perf_buffer__buffer_cnt()
10278 if (buf_idx >= pb->cpu_cnt) in perf_buffer__buffer_fd()
10279 return -EINVAL; in perf_buffer__buffer_fd()
10281 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__buffer_fd()
10283 return -ENOENT; in perf_buffer__buffer_fd()
10285 return cpu_buf->fd; in perf_buffer__buffer_fd()
10293 * - 0 on success;
10294 * - <0 on failure.
10300 if (buf_idx >= pb->cpu_cnt) in perf_buffer__consume_buffer()
10301 return -EINVAL; in perf_buffer__consume_buffer()
10303 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__consume_buffer()
10305 return -ENOENT; in perf_buffer__consume_buffer()
10314 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__consume()
10315 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__consume()
10333 * < 0: fix size of -size_offset
10341 -1,
10346 -1,
10351 -(int)sizeof(__u32),
10356 -(int)sizeof(__u64),
10361 -(int)sizeof(__u32),
10381 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10393 return -(int)offset; in bpf_prog_info_read_offset_u32()
10403 return -(int)offset; in bpf_prog_info_read_offset_u64()
10435 return ERR_PTR(-EINVAL); in bpf_program__get_prog_info_linear()
10441 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10453 if (info_len < desc->array_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10454 info_len < desc->count_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10455 (desc->size_offset > 0 && info_len < desc->size_offset)) in bpf_program__get_prog_info_linear()
10463 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10464 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10473 return ERR_PTR(-ENOMEM); in bpf_program__get_prog_info_linear()
10475 /* step 4: fill data to info_linear->info */ in bpf_program__get_prog_info_linear()
10476 info_linear->arrays = arrays; in bpf_program__get_prog_info_linear()
10477 memset(&info_linear->info, 0, sizeof(info)); in bpf_program__get_prog_info_linear()
10478 ptr = info_linear->data; in bpf_program__get_prog_info_linear()
10488 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10489 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10490 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10491 desc->count_offset, count); in bpf_program__get_prog_info_linear()
10492 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10493 desc->size_offset, size); in bpf_program__get_prog_info_linear()
10494 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__get_prog_info_linear()
10495 desc->array_offset, in bpf_program__get_prog_info_linear()
10501 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); in bpf_program__get_prog_info_linear()
10505 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10517 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10518 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10519 desc->count_offset); in bpf_program__get_prog_info_linear()
10523 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10524 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10525 desc->size_offset); in bpf_program__get_prog_info_linear()
10531 info_linear->info_len = sizeof(struct bpf_prog_info); in bpf_program__get_prog_info_linear()
10532 info_linear->data_len = data_len; in bpf_program__get_prog_info_linear()
10545 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_addr_to_offs()
10549 addr = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10550 desc->array_offset); in bpf_program__bpil_addr_to_offs()
10551 offs = addr - ptr_to_u64(info_linear->data); in bpf_program__bpil_addr_to_offs()
10552 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10553 desc->array_offset, offs); in bpf_program__bpil_addr_to_offs()
10565 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_offs_to_addr()
10569 offs = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10570 desc->array_offset); in bpf_program__bpil_offs_to_addr()
10571 addr = offs + ptr_to_u64(info_linear->data); in bpf_program__bpil_offs_to_addr()
10572 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10573 desc->array_offset, addr); in bpf_program__bpil_offs_to_addr()
10584 return -EINVAL; in bpf_program__set_attach_target()
10591 prog->expected_attach_type); in bpf_program__set_attach_target()
10596 prog->attach_btf_id = btf_id; in bpf_program__set_attach_target()
10597 prog->attach_prog_fd = attach_prog_fd; in bpf_program__set_attach_target()
10603 int err = 0, n, len, start, end = -1; in parse_cpu_mask_str()
10609 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ in parse_cpu_mask_str()
10615 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); in parse_cpu_mask_str()
10618 err = -EINVAL; in parse_cpu_mask_str()
10626 err = -EINVAL; in parse_cpu_mask_str()
10631 err = -ENOMEM; in parse_cpu_mask_str()
10635 memset(tmp + *mask_sz, 0, start - *mask_sz); in parse_cpu_mask_str()
10636 memset(tmp + start, 1, end - start + 1); in parse_cpu_mask_str()
10642 return -EINVAL; in parse_cpu_mask_str()
10658 err = -errno; in parse_cpu_mask_file()
10665 err = len ? -errno : -EINVAL; in parse_cpu_mask_file()
10671 return -E2BIG; in parse_cpu_mask_file()
10708 .object_name = s->name, in bpf_object__open_skeleton()
10713 /* Attempt to preserve opts->object_name, unless overriden by user in bpf_object__open_skeleton()
10721 if (!opts->object_name) in bpf_object__open_skeleton()
10722 skel_opts.object_name = s->name; in bpf_object__open_skeleton()
10725 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); in bpf_object__open_skeleton()
10728 s->name, PTR_ERR(obj)); in bpf_object__open_skeleton()
10732 *s->obj = obj; in bpf_object__open_skeleton()
10734 for (i = 0; i < s->map_cnt; i++) { in bpf_object__open_skeleton()
10735 struct bpf_map **map = s->maps[i].map; in bpf_object__open_skeleton()
10736 const char *name = s->maps[i].name; in bpf_object__open_skeleton()
10737 void **mmaped = s->maps[i].mmaped; in bpf_object__open_skeleton()
10742 return -ESRCH; in bpf_object__open_skeleton()
10745 /* externs shouldn't be pre-setup from user code */ in bpf_object__open_skeleton()
10746 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) in bpf_object__open_skeleton()
10747 *mmaped = (*map)->mmaped; in bpf_object__open_skeleton()
10750 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__open_skeleton()
10751 struct bpf_program **prog = s->progs[i].prog; in bpf_object__open_skeleton()
10752 const char *name = s->progs[i].name; in bpf_object__open_skeleton()
10757 return -ESRCH; in bpf_object__open_skeleton()
10768 err = bpf_object__load(*s->obj); in bpf_object__load_skeleton()
10770 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); in bpf_object__load_skeleton()
10774 for (i = 0; i < s->map_cnt; i++) { in bpf_object__load_skeleton()
10775 struct bpf_map *map = *s->maps[i].map; in bpf_object__load_skeleton()
10778 void **mmaped = s->maps[i].mmaped; in bpf_object__load_skeleton()
10783 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { in bpf_object__load_skeleton()
10788 if (map->def.map_flags & BPF_F_RDONLY_PROG) in bpf_object__load_skeleton()
10793 /* Remap anonymous mmap()-ed "map initialization image" as in bpf_object__load_skeleton()
10794 * a BPF map-backed mmap()-ed memory, but preserving the same in bpf_object__load_skeleton()
10803 *mmaped = mmap(map->mmaped, mmap_sz, prot, in bpf_object__load_skeleton()
10806 err = -errno; in bpf_object__load_skeleton()
10808 pr_warn("failed to re-mmap() map '%s': %d\n", in bpf_object__load_skeleton()
10821 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__attach_skeleton()
10822 struct bpf_program *prog = *s->progs[i].prog; in bpf_object__attach_skeleton()
10823 struct bpf_link **link = s->progs[i].link; in bpf_object__attach_skeleton()
10826 if (!prog->load) in bpf_object__attach_skeleton()
10829 sec_def = find_sec_def(prog->sec_name); in bpf_object__attach_skeleton()
10830 if (!sec_def || !sec_def->attach_fn) in bpf_object__attach_skeleton()
10833 *link = sec_def->attach_fn(sec_def, prog); in bpf_object__attach_skeleton()
10835 pr_warn("failed to auto-attach program '%s': %ld\n", in bpf_object__attach_skeleton()
10848 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__detach_skeleton()
10849 struct bpf_link **link = s->progs[i].link; in bpf_object__detach_skeleton()
10858 if (s->progs) in bpf_object__destroy_skeleton()
10860 if (s->obj) in bpf_object__destroy_skeleton()
10861 bpf_object__close(*s->obj); in bpf_object__destroy_skeleton()
10862 free(s->maps); in bpf_object__destroy_skeleton()
10863 free(s->progs); in bpf_object__destroy_skeleton()