libbpf: Tie struct_ops programs to kernel BTF ids, not to local ids

Enforce the following existing limitation on struct_ops programs based
on kernel BTF id instead of program-local BTF id:

    struct_ops BPF prog can be re-used between multiple .struct_ops &
    .struct_ops.link as long as it's the same struct_ops struct
    definition and the same function pointer field

This allows reusing same BPF program for versioned struct_ops map
definitions, e.g.:

    SEC("struct_ops/test")
    int BPF_PROG(foo) { ... }

    struct some_ops___v1 { int (*test)(void); };
    struct some_ops___v2 { int (*test)(void); };

    SEC(".struct_ops.link") struct some_ops___v1 a = { .test = foo }
    SEC(".struct_ops.link") struct some_ops___v2 b = { .test = foo }

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240306104529.6453-3-eddyz87@gmail.com
This commit is contained in:
Eduard Zingerman 2024-03-06 12:45:16 +02:00 committed by Andrii Nakryiko
parent a2a5172cf1
commit d9ab2f76ef
1 changed files with 26 additions and 23 deletions

View File

@ -1146,8 +1146,32 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
if (mod_btf) if (mod_btf)
prog->attach_btf_obj_fd = mod_btf->fd; prog->attach_btf_obj_fd = mod_btf->fd;
prog->attach_btf_id = kern_type_id;
prog->expected_attach_type = kern_member_idx; /* if we haven't yet processed this BPF program, record proper
* attach_btf_id and member_idx
*/
if (!prog->attach_btf_id) {
prog->attach_btf_id = kern_type_id;
prog->expected_attach_type = kern_member_idx;
}
/* struct_ops BPF prog can be re-used between multiple
* .struct_ops & .struct_ops.link as long as it's the
* same struct_ops struct definition and the same
* function pointer field
*/
if (prog->attach_btf_id != kern_type_id) {
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
map->name, mname, prog->name, prog->sec_name, prog->type,
prog->attach_btf_id, kern_type_id);
return -EINVAL;
}
if (prog->expected_attach_type != kern_member_idx) {
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
map->name, mname, prog->name, prog->sec_name, prog->type,
prog->expected_attach_type, kern_member_idx);
return -EINVAL;
}
st_ops->kern_func_off[i] = kern_data_off + kern_moff; st_ops->kern_func_off[i] = kern_data_off + kern_moff;
@ -9428,27 +9452,6 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL; return -EINVAL;
} }
/* if we haven't yet processed this BPF program, record proper
* attach_btf_id and member_idx
*/
if (!prog->attach_btf_id) {
prog->attach_btf_id = st_ops->type_id;
prog->expected_attach_type = member_idx;
}
/* struct_ops BPF prog can be re-used between multiple
* .struct_ops & .struct_ops.link as long as it's the
* same struct_ops struct definition and the same
* function pointer field
*/
if (prog->attach_btf_id != st_ops->type_id ||
prog->expected_attach_type != member_idx) {
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
map->name, prog->name, prog->sec_name, prog->type,
prog->attach_btf_id, prog->expected_attach_type, name);
return -EINVAL;
}
st_ops->progs[member_idx] = prog; st_ops->progs[member_idx] = prog;
/* st_ops->data will be exposed to users, being returned by /* st_ops->data will be exposed to users, being returned by