bpf-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZgHmTAAKCRDbK58LschI g1gWAP9HjAWE/Sy0B2t9opIiTqRzdMJLYs2B4OFeHRI6+qQg0gD6A4jsKEh/xmtG Hhjw+AElJRFZ3SUIT4mZlljzUHIYYAA= =T0lM -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Daniel Borkmann says: ==================== pull-request: bpf 2024-03-25 The following pull-request contains BPF updates for your *net* tree. We've added 17 non-merge commits during the last 12 day(s) which contain a total of 19 files changed, 184 insertions(+), 61 deletions(-). The main changes are: 1) Fix an arm64 BPF JIT bug in BPF_LDX_MEMSX implementation's offset handling found via test_bpf module, from Puranjay Mohan. 2) Various fixups to the BPF arena code in particular in the BPF verifier and around BPF selftests to match latest corresponding LLVM implementation, from Puranjay Mohan and Alexei Starovoitov. 3) Fix xsk to not assume that metadata is always requested in TX completion, from Stanislav Fomichev. 4) Fix riscv BPF JIT's kfunc parameter incompatibility between BPF and the riscv ABI which requires sign-extension on int/uint, from Pu Lehui. 5) Fix s390x BPF JIT's bpf_plt pointer arithmetic which triggered a crash when testing struct_ops, from Ilya Leoshkevich. 6) Fix libbpf's arena mmap handling which had incorrect u64-to-pointer cast on 32-bit architectures, from Andrii Nakryiko. 7) Fix libbpf to define MFD_CLOEXEC when not available, from Arnaldo Carvalho de Melo. 8) Fix arm64 BPF JIT implementation for 32bit unconditional bswap which resulted in an incorrect swap as indicated by test_bpf, from Artem Savkov. 9) Fix BPF man page build script to use silent mode, from Hangbin Liu. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: riscv, bpf: Fix kfunc parameters incompatibility between bpf and riscv abi bpf: verifier: reject addr_space_cast insn without arena selftests/bpf: verifier_arena: fix mmap address for arm64 bpf: verifier: fix addr_space_cast from as(1) to as(0) libbpf: Define MFD_CLOEXEC if not available arm64: bpf: fix 32bit unconditional bswap bpf, arm64: fix bug in BPF_LDX_MEMSX libbpf: fix u64-to-pointer cast on 32-bit arches s390/bpf: Fix bpf_plt pointer arithmetic xsk: Don't assume metadata is always requested in TX completion selftests/bpf: Add arena test case for 4Gbyte corner case selftests/bpf: Remove hard coded PAGE_SIZE macro. libbpf, selftests/bpf: Adjust libbpf, bpftool, selftests to match LLVM bpf: Clarify bpf_arena comments. MAINTAINERS: Update email address for Quentin Monnet scripts/bpf_doc: Use silent mode when exec make cmd bpf: Temporarily disable atomic operations in BPF arena ==================== Link: https://lore.kernel.org/r/20240325213520.26688-1-daniel@iogearbox.net Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
37ccdf7f11
3
.mailmap
3
.mailmap
|
@ -497,7 +497,8 @@ Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
|
|||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
|
||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
|
||||
|
|
|
@ -3967,7 +3967,7 @@ F: kernel/bpf/bpf_lru*
|
|||
F: kernel/bpf/cgroup.c
|
||||
|
||||
BPF [TOOLING] (bpftool)
|
||||
M: Quentin Monnet <quentin@isovalent.com>
|
||||
M: Quentin Monnet <qmo@kernel.org>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: kernel/bpf/disasm.*
|
||||
|
|
|
@ -943,7 +943,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
emit(A64_UXTH(is64, dst, dst), ctx);
|
||||
break;
|
||||
case 32:
|
||||
emit(A64_REV32(is64, dst, dst), ctx);
|
||||
emit(A64_REV32(0, dst, dst), ctx);
|
||||
/* upper 32 bits already cleared */
|
||||
break;
|
||||
case 64:
|
||||
|
@ -1256,7 +1256,7 @@ emit_cond_jmp:
|
|||
} else {
|
||||
emit_a64_mov_i(1, tmp, off, ctx);
|
||||
if (sign_extend)
|
||||
emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
|
||||
emit(A64_LDRSW(dst, src, tmp), ctx);
|
||||
else
|
||||
emit(A64_LDR32(dst, src, tmp), ctx);
|
||||
}
|
||||
|
|
|
@ -1463,6 +1463,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
|
||||
const struct btf_func_model *fm;
|
||||
int idx;
|
||||
|
||||
fm = bpf_jit_find_kfunc_model(ctx->prog, insn);
|
||||
if (!fm)
|
||||
return -EINVAL;
|
||||
|
||||
for (idx = 0; idx < fm->nr_args; idx++) {
|
||||
u8 reg = bpf_to_rv_reg(BPF_REG_1 + idx, ctx);
|
||||
|
||||
if (fm->arg_size[idx] == sizeof(int))
|
||||
emit_sextw(reg, reg, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
ret = emit_call(addr, fixed_addr, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
|
|||
* PLT for hotpatchable calls. The calling convention is the same as for the
|
||||
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
|
||||
*/
|
||||
extern const char bpf_plt[];
|
||||
extern const char bpf_plt_ret[];
|
||||
extern const char bpf_plt_target[];
|
||||
extern const char bpf_plt_end[];
|
||||
#define BPF_PLT_SIZE 32
|
||||
struct bpf_plt {
|
||||
char code[16];
|
||||
void *ret;
|
||||
void *target;
|
||||
} __packed;
|
||||
extern const struct bpf_plt bpf_plt;
|
||||
asm(
|
||||
".pushsection .rodata\n"
|
||||
" .balign 8\n"
|
||||
|
@ -531,15 +532,14 @@ asm(
|
|||
" .balign 8\n"
|
||||
"bpf_plt_ret: .quad 0\n"
|
||||
"bpf_plt_target: .quad 0\n"
|
||||
"bpf_plt_end:\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
static void bpf_jit_plt(void *plt, void *ret, void *target)
|
||||
static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
|
||||
{
|
||||
memcpy(plt, bpf_plt, BPF_PLT_SIZE);
|
||||
*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
|
||||
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
|
||||
memcpy(plt, &bpf_plt, sizeof(*plt));
|
||||
plt->ret = ret;
|
||||
plt->target = target;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
|
|||
jit->prg = ALIGN(jit->prg, 8);
|
||||
jit->prologue_plt = jit->prg;
|
||||
if (jit->prg_buf)
|
||||
bpf_jit_plt(jit->prg_buf + jit->prg,
|
||||
bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
|
||||
jit->prg_buf + jit->prologue_plt_ret, NULL);
|
||||
jit->prg += BPF_PLT_SIZE;
|
||||
jit->prg += sizeof(struct bpf_plt);
|
||||
}
|
||||
|
||||
static int get_probe_mem_regno(const u8 *insn)
|
||||
|
@ -2040,9 +2040,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
struct bpf_jit jit;
|
||||
int pass;
|
||||
|
||||
if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
|
||||
return orig_fp;
|
||||
|
||||
if (!fp->jit_requested)
|
||||
return orig_fp;
|
||||
|
||||
|
@ -2148,14 +2145,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
|
|||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
struct bpf_plt expected_plt, current_plt, new_plt, *plt;
|
||||
struct {
|
||||
u16 opc;
|
||||
s32 disp;
|
||||
} __packed insn;
|
||||
char expected_plt[BPF_PLT_SIZE];
|
||||
char current_plt[BPF_PLT_SIZE];
|
||||
char new_plt[BPF_PLT_SIZE];
|
||||
char *plt;
|
||||
char *ret;
|
||||
int err;
|
||||
|
||||
|
@ -2174,18 +2168,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
*/
|
||||
} else {
|
||||
/* Verify the PLT. */
|
||||
plt = (char *)ip + (insn.disp << 1);
|
||||
err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
|
||||
plt = ip + (insn.disp << 1);
|
||||
err = copy_from_kernel_nofault(¤t_plt, plt,
|
||||
sizeof(current_plt));
|
||||
if (err < 0)
|
||||
return err;
|
||||
ret = (char *)ip + 6;
|
||||
bpf_jit_plt(expected_plt, ret, old_addr);
|
||||
if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
|
||||
bpf_jit_plt(&expected_plt, ret, old_addr);
|
||||
if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt)))
|
||||
return -EINVAL;
|
||||
/* Adjust the call address. */
|
||||
bpf_jit_plt(new_plt, ret, new_addr);
|
||||
s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
|
||||
new_plt + (bpf_plt_target - bpf_plt),
|
||||
bpf_jit_plt(&new_plt, ret, new_addr);
|
||||
s390_kernel_write(&plt->target, &new_plt.target,
|
||||
sizeof(void *));
|
||||
}
|
||||
|
||||
|
|
|
@ -188,6 +188,8 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
|
|||
{
|
||||
if (!compl)
|
||||
return;
|
||||
if (!compl->tx_timestamp)
|
||||
return;
|
||||
|
||||
*compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
|
||||
#define GUARD_SZ (1ull << sizeof(((struct bpf_insn *)0)->off) * 8)
|
||||
#define KERN_VM_SZ ((1ull << 32) + GUARD_SZ)
|
||||
#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
|
||||
|
||||
struct bpf_arena {
|
||||
struct bpf_map map;
|
||||
|
@ -110,7 +110,7 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
vm_range = (u64)attr->max_entries * PAGE_SIZE;
|
||||
if (vm_range > (1ull << 32))
|
||||
if (vm_range > SZ_4G)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))
|
||||
|
@ -301,7 +301,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad
|
|||
|
||||
if (pgoff)
|
||||
return -EINVAL;
|
||||
if (len > (1ull << 32))
|
||||
if (len > SZ_4G)
|
||||
return -E2BIG;
|
||||
|
||||
/* if user_vm_start was specified at arena creation time */
|
||||
|
@ -322,7 +322,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad
|
|||
if (WARN_ON_ONCE(arena->user_vm_start))
|
||||
/* checks at map creation time should prevent this */
|
||||
return -EFAULT;
|
||||
return round_up(ret, 1ull << 32);
|
||||
return round_up(ret, SZ_4G);
|
||||
}
|
||||
|
||||
static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
|
||||
|
@ -346,7 +346,7 @@ static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
|
|||
return -EBUSY;
|
||||
|
||||
/* Earlier checks should prevent this */
|
||||
if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > (1ull << 32) || vma->vm_pgoff))
|
||||
if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff))
|
||||
return -EFAULT;
|
||||
|
||||
if (remember_vma(arena, vma))
|
||||
|
@ -420,7 +420,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
|
|||
if (uaddr & ~PAGE_MASK)
|
||||
return 0;
|
||||
pgoff = compute_pgoff(arena, uaddr);
|
||||
if (pgoff + page_cnt > page_cnt_max)
|
||||
if (pgoff > page_cnt_max - page_cnt)
|
||||
/* requested address will be outside of user VMA */
|
||||
return 0;
|
||||
}
|
||||
|
@ -447,7 +447,13 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
|
|||
goto out;
|
||||
|
||||
uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
|
||||
/* Earlier checks make sure that uaddr32 + page_cnt * PAGE_SIZE will not overflow 32-bit */
|
||||
/* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1
|
||||
* will not overflow 32-bit. Lower 32-bit need to represent
|
||||
* contiguous user address range.
|
||||
* Map these pages at kern_vm_start base.
|
||||
* kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow
|
||||
* lower 32-bit and it's ok.
|
||||
*/
|
||||
ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
|
||||
kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);
|
||||
if (ret) {
|
||||
|
@ -510,6 +516,11 @@ static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
|
|||
if (!page)
|
||||
continue;
|
||||
if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */
|
||||
/* Optimization for the common case of page_cnt==1:
|
||||
* If page wasn't mapped into some user vma there
|
||||
* is no need to call zap_pages which is slow. When
|
||||
* page_cnt is big it's faster to do the batched zap.
|
||||
*/
|
||||
zap_pages(arena, full_uaddr, 1);
|
||||
vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
|
||||
__free_page(page);
|
||||
|
|
|
@ -5682,6 +5682,13 @@ static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
|
|||
return reg->type == PTR_TO_FLOW_KEYS;
|
||||
}
|
||||
|
||||
static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = reg_state(env, regno);
|
||||
|
||||
return reg->type == PTR_TO_ARENA;
|
||||
}
|
||||
|
||||
static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
|
||||
#ifdef CONFIG_NET
|
||||
[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
|
||||
|
@ -7019,7 +7026,8 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
|||
if (is_ctx_reg(env, insn->dst_reg) ||
|
||||
is_pkt_reg(env, insn->dst_reg) ||
|
||||
is_flow_key_reg(env, insn->dst_reg) ||
|
||||
is_sk_reg(env, insn->dst_reg)) {
|
||||
is_sk_reg(env, insn->dst_reg) ||
|
||||
is_arena_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg,
|
||||
reg_type_str(env, reg_state(env, insn->dst_reg)->type));
|
||||
|
@ -14014,6 +14022,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!env->prog->aux->arena) {
|
||||
verbose(env, "addr_space_cast insn can only be used in a program that has an associated arena\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&
|
||||
insn->off != 32) || insn->imm) {
|
||||
|
@ -14046,8 +14058,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
if (insn->imm) {
|
||||
/* off == BPF_ADDR_SPACE_CAST */
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
if (insn->imm == 1) /* cast from as(1) to as(0) */
|
||||
if (insn->imm == 1) { /* cast from as(1) to as(0) */
|
||||
dst_reg->type = PTR_TO_ARENA;
|
||||
/* PTR_TO_ARENA is 32-bit */
|
||||
dst_reg->subreg_def = env->insn_idx + 1;
|
||||
}
|
||||
} else if (insn->off == 0) {
|
||||
/* case: R1 = R2
|
||||
* copy register state to dest reg
|
||||
|
@ -19601,8 +19616,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
(((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
|
||||
/* convert to 32-bit mov that clears upper 32-bit */
|
||||
insn->code = BPF_ALU | BPF_MOV | BPF_X;
|
||||
/* clear off, so it's a normal 'wX = wY' from JIT pov */
|
||||
/* clear off and imm, so it's a normal 'wX = wY' from JIT pov */
|
||||
insn->off = 0;
|
||||
insn->imm = 0;
|
||||
} /* cast from as(0) to as(1) should be handled by JIT */
|
||||
goto next_insn;
|
||||
}
|
||||
|
|
|
@ -414,8 +414,8 @@ class PrinterRST(Printer):
|
|||
version = version.stdout.decode().rstrip()
|
||||
except:
|
||||
try:
|
||||
version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot,
|
||||
capture_output=True, check=True)
|
||||
version = subprocess.run(['make', '-s', '--no-print-directory', 'kernelversion'],
|
||||
cwd=linuxRoot, capture_output=True, check=True)
|
||||
version = version.stdout.decode().rstrip()
|
||||
except:
|
||||
return 'Linux'
|
||||
|
|
|
@ -121,7 +121,7 @@ static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
|
|||
int i, n;
|
||||
|
||||
/* recognize hard coded LLVM section name */
|
||||
if (strcmp(sec_name, ".arena.1") == 0) {
|
||||
if (strcmp(sec_name, ".addr_space.1") == 0) {
|
||||
/* this is the name to use in skeleton */
|
||||
snprintf(buf, buf_sz, "arena");
|
||||
return true;
|
||||
|
|
|
@ -498,7 +498,7 @@ struct bpf_struct_ops {
|
|||
#define KSYMS_SEC ".ksyms"
|
||||
#define STRUCT_OPS_SEC ".struct_ops"
|
||||
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
|
||||
#define ARENA_SEC ".arena.1"
|
||||
#define ARENA_SEC ".addr_space.1"
|
||||
|
||||
enum libbpf_map_type {
|
||||
LIBBPF_MAP_UNSPEC,
|
||||
|
@ -1650,6 +1650,10 @@ static int sys_memfd_create(const char *name, unsigned flags)
|
|||
return syscall(__NR_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
|
||||
static int create_placeholder_fd(void)
|
||||
{
|
||||
int fd;
|
||||
|
@ -5352,8 +5356,8 @@ retry:
|
|||
goto err_out;
|
||||
}
|
||||
if (map->def.type == BPF_MAP_TYPE_ARENA) {
|
||||
map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
|
||||
PROT_READ | PROT_WRITE,
|
||||
map->mmaped = mmap((void *)(long)map->map_extra,
|
||||
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
|
||||
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
|
||||
map->fd, 0);
|
||||
if (map->mmaped == MAP_FAILED) {
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
#endif
|
||||
|
||||
#if defined(__BPF_FEATURE_ARENA_CAST) && !defined(BPF_ARENA_FORCE_ASM)
|
||||
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
|
||||
#define __arena __attribute__((address_space(1)))
|
||||
#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
|
||||
#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
|
||||
|
|
|
@ -3,12 +3,14 @@
|
|||
#include <test_progs.h>
|
||||
#include <sys/mman.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include <sys/user.h>
|
||||
#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
|
||||
#include <unistd.h>
|
||||
#define PAGE_SIZE getpagesize()
|
||||
#endif
|
||||
#include "arena_htab_asm.skel.h"
|
||||
#include "arena_htab.skel.h"
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
#include "bpf_arena_htab.h"
|
||||
|
||||
static void test_arena_htab_common(struct htab *htab)
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
#include <test_progs.h>
|
||||
#include <sys/mman.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
#include <sys/user.h>
|
||||
#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
|
||||
#include <unistd.h>
|
||||
#define PAGE_SIZE getpagesize()
|
||||
#endif
|
||||
|
||||
#include "bpf_arena_list.h"
|
||||
#include "arena_list.skel.h"
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "cap_helpers.h"
|
||||
#include "verifier_and.skel.h"
|
||||
#include "verifier_arena.skel.h"
|
||||
#include "verifier_arena_large.skel.h"
|
||||
#include "verifier_array_access.skel.h"
|
||||
#include "verifier_basic_stack.skel.h"
|
||||
#include "verifier_bitfield_write.skel.h"
|
||||
|
@ -120,6 +121,7 @@ static void run_tests_aux(const char *skel_name,
|
|||
|
||||
void test_verifier_and(void) { RUN(verifier_and); }
|
||||
void test_verifier_arena(void) { RUN(verifier_arena); }
|
||||
void test_verifier_arena_large(void) { RUN(verifier_arena_large); }
|
||||
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
|
||||
void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); }
|
||||
void test_verifier_bounds(void) { RUN(verifier_bounds); }
|
||||
|
|
|
@ -22,7 +22,7 @@ int zero = 0;
|
|||
SEC("syscall")
|
||||
int arena_htab_llvm(void *ctx)
|
||||
{
|
||||
#if defined(__BPF_FEATURE_ARENA_CAST) || defined(BPF_ARENA_FORCE_ASM)
|
||||
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
|
||||
struct htab __arena *htab;
|
||||
__u64 i;
|
||||
|
||||
|
|
|
@ -30,13 +30,13 @@ int list_sum;
|
|||
int cnt;
|
||||
bool skip = false;
|
||||
|
||||
#ifdef __BPF_FEATURE_ARENA_CAST
|
||||
#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
|
||||
long __arena arena_sum;
|
||||
int __arena test_val = 1;
|
||||
struct arena_list_head __arena global_head;
|
||||
#else
|
||||
long arena_sum SEC(".arena.1");
|
||||
int test_val SEC(".arena.1");
|
||||
long arena_sum SEC(".addr_space.1");
|
||||
int test_val SEC(".addr_space.1");
|
||||
#endif
|
||||
|
||||
int zero;
|
||||
|
@ -44,7 +44,7 @@ int zero;
|
|||
SEC("syscall")
|
||||
int arena_list_add(void *ctx)
|
||||
{
|
||||
#ifdef __BPF_FEATURE_ARENA_CAST
|
||||
#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
|
||||
__u64 i;
|
||||
|
||||
list_head = &global_head;
|
||||
|
@ -66,7 +66,7 @@ int arena_list_add(void *ctx)
|
|||
SEC("syscall")
|
||||
int arena_list_del(void *ctx)
|
||||
{
|
||||
#ifdef __BPF_FEATURE_ARENA_CAST
|
||||
#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
|
||||
struct elem __arena *n;
|
||||
int sum = 0;
|
||||
|
||||
|
|
|
@ -12,14 +12,18 @@ struct {
|
|||
__uint(type, BPF_MAP_TYPE_ARENA);
|
||||
__uint(map_flags, BPF_F_MMAPABLE);
|
||||
__uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
|
||||
__ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
|
||||
#ifdef __TARGET_ARCH_arm64
|
||||
__ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
|
||||
#else
|
||||
__ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
|
||||
#endif
|
||||
} arena SEC(".maps");
|
||||
|
||||
SEC("syscall")
|
||||
__success __retval(0)
|
||||
int basic_alloc1(void *ctx)
|
||||
{
|
||||
#if defined(__BPF_FEATURE_ARENA_CAST)
|
||||
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
||||
volatile int __arena *page1, *page2, *no_page, *page3;
|
||||
|
||||
page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
|
||||
|
@ -58,7 +62,7 @@ SEC("syscall")
|
|||
__success __retval(0)
|
||||
int basic_alloc2(void *ctx)
|
||||
{
|
||||
#if defined(__BPF_FEATURE_ARENA_CAST)
|
||||
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
||||
volatile char __arena *page1, *page2, *page3, *page4;
|
||||
|
||||
page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_experimental.h"
|
||||
#include "bpf_arena_common.h"
|
||||
|
||||
#define ARENA_SIZE (1ull << 32)
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARENA);
|
||||
__uint(map_flags, BPF_F_MMAPABLE);
|
||||
__uint(max_entries, ARENA_SIZE / PAGE_SIZE);
|
||||
} arena SEC(".maps");
|
||||
|
||||
SEC("syscall")
|
||||
__success __retval(0)
|
||||
int big_alloc1(void *ctx)
|
||||
{
|
||||
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
||||
volatile char __arena *page1, *page2, *no_page, *page3;
|
||||
void __arena *base;
|
||||
|
||||
page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
|
||||
if (!page1)
|
||||
return 1;
|
||||
*page1 = 1;
|
||||
page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
|
||||
1, NUMA_NO_NODE, 0);
|
||||
if (!page2)
|
||||
return 2;
|
||||
*page2 = 2;
|
||||
no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE,
|
||||
1, NUMA_NO_NODE, 0);
|
||||
if (no_page)
|
||||
return 3;
|
||||
if (*page1 != 1)
|
||||
return 4;
|
||||
if (*page2 != 2)
|
||||
return 5;
|
||||
bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
|
||||
if (*page2 != 2)
|
||||
return 6;
|
||||
if (*page1 != 0) /* use-after-free should return 0 */
|
||||
return 7;
|
||||
page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
|
||||
if (!page3)
|
||||
return 8;
|
||||
*page3 = 3;
|
||||
if (page1 != page3)
|
||||
return 9;
|
||||
if (*page2 != 2)
|
||||
return 10;
|
||||
if (*(page1 + PAGE_SIZE) != 0)
|
||||
return 11;
|
||||
if (*(page1 - PAGE_SIZE) != 0)
|
||||
return 12;
|
||||
if (*(page2 + PAGE_SIZE) != 0)
|
||||
return 13;
|
||||
if (*(page2 - PAGE_SIZE) != 0)
|
||||
return 14;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue