In 568f196756ad ("bpf: check that BPF programs run with preemption disabled") a check was added for BPF_PROG_RUN() that for every invocation preemption has to be disabled to not break eBPF assumptions (e.g. per-cpu map). Of course this does not count for seccomp because only cBPF -> eBPF is loaded here and it does not make use of any functionality that would require this assertion. Fix this false positive by adding and using __BPF_PROG_RUN() variant that does not have the cant_sleep(); check.
Fixes: 568f196756ad ("bpf: check that BPF programs run with preemption disabled") Reported-by: syzbot+8bf19ee2aa580de7a...@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann <dan...@iogearbox.net> --- include/linux/filter.h | 9 ++++++++- kernel/seccomp.c | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index f32b3ec..2648fd7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -533,7 +533,14 @@ struct sk_filter { struct bpf_prog *prog; }; -#define BPF_PROG_RUN(filter, ctx) ({ cant_sleep(); (*(filter)->bpf_func)(ctx, (filter)->insnsi); }) +#define bpf_prog_run__non_preempt(prog, ctx) \ + ({ cant_sleep(); __BPF_PROG_RUN(prog, ctx); }) +/* Native eBPF or cBPF -> eBPF transitions. Preemption must be disabled. */ +#define BPF_PROG_RUN(prog, ctx) \ + bpf_prog_run__non_preempt(prog, ctx) +/* Direct use for cBPF -> eBPF only, but not for native eBPF. */ +#define __BPF_PROG_RUN(prog, ctx) \ + (*(prog)->bpf_func)(ctx, (prog)->insnsi) #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN diff --git a/kernel/seccomp.c b/kernel/seccomp.c index e815781..826d4e4 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -268,7 +268,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd, * value always takes priority (ignoring the DATA). */ for (; f; f = f->prev) { - u32 cur_ret = BPF_PROG_RUN(f->prog, sd); + u32 cur_ret = __BPF_PROG_RUN(f->prog, sd); if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) { ret = cur_ret; -- 2.9.5