On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <s...@google.com> wrote: > > The input is packet data, the output is struct bpf_flow_key. This should > make it easy to test flow dissector programs without elaborate > setup. > > Signed-off-by: Stanislav Fomichev <s...@google.com> > --- > include/linux/bpf.h | 3 ++ > net/bpf/test_run.c | 76 +++++++++++++++++++++++++++++++++++++++++---- > net/core/filter.c | 1 + > 3 files changed, 74 insertions(+), 6 deletions(-) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index e82b7039fc66..7a572d15d5dd 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const > union bpf_attr *kattr, > union bpf_attr __user *uattr); > int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, > union bpf_attr __user *uattr); > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, > + const union bpf_attr *kattr, > + union bpf_attr __user *uattr); > > /* an array of programs to be executed under rcu_lock. > * > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c > index c89c22c49015..bfa05d31c6e3 100644 > --- a/net/bpf/test_run.c > +++ b/net/bpf/test_run.c > @@ -14,21 +14,33 @@ > #include <net/tcp.h> > > static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, > - struct bpf_cgroup_storage > *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) > + struct bpf_cgroup_storage > *storage[MAX_BPF_CGROUP_STORAGE_TYPE], > + struct bpf_flow_keys *flow_keys) > { > u32 ret; > > preempt_disable(); > rcu_read_lock(); > bpf_cgroup_storage_set(storage); > - ret = BPF_PROG_RUN(prog, ctx); > + > + switch (prog->type) { > + case BPF_PROG_TYPE_FLOW_DISSECTOR: > + ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector, > + flow_keys); > + break; > + default: > + ret = BPF_PROG_RUN(prog, ctx); > + break; > + } > +
Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()? In that way, the logic flow is similar to other bpf_prog_test_run_XXX() functions. Thanks, Song > rcu_read_unlock(); > preempt_enable(); > > return ret; > } > > -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 > *time) > +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 > *time, > + struct bpf_flow_keys *flow_keys) > { > struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 > }; > enum bpf_cgroup_storage_type stype; > @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, > u32 repeat, u32 *time) > repeat = 1; > time_start = ktime_get_ns(); > for (i = 0; i < repeat; i++) { > - ret = bpf_test_run_one(prog, ctx, storage); > + ret = bpf_test_run_one(prog, ctx, storage, flow_keys); > if (need_resched()) { > if (signal_pending(current)) > break; > @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const > union bpf_attr *kattr, > __skb_push(skb, hh_len); > if (is_direct_pkt_access) > bpf_compute_data_pointers(skb); > - retval = bpf_test_run(prog, skb, repeat, &duration); > + retval = bpf_test_run(prog, skb, repeat, &duration, NULL); > if (!is_l2) { > if (skb_headroom(skb) < hh_len) { > int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); > @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const > union bpf_attr *kattr, > rxqueue = > __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); > xdp.rxq = &rxqueue->xdp_rxq; > > - retval = bpf_test_run(prog, &xdp, repeat, &duration); > + retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL); > if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || > xdp.data_end != xdp.data + size) > size = xdp.data_end - xdp.data; > @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const > union bpf_attr *kattr, > kfree(data); > return ret; > } > + > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, > + const union bpf_attr *kattr, > + union bpf_attr __user *uattr) > +{ > + struct bpf_flow_keys flow_keys = {}; > + u32 size = kattr->test.data_size_in; > + u32 repeat = kattr->test.repeat; > + u32 retval, duration; > + struct sk_buff *skb; > + struct sock *sk; > + void *data; > + int ret; > + > + if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) > + return -EINVAL; > + > + data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); > + if (IS_ERR(data)) > + return PTR_ERR(data); > + > + sk = kzalloc(sizeof(*sk), GFP_USER); > + if (!sk) { > + kfree(data); > + return -ENOMEM; > + } > + sock_net_set(sk, current->nsproxy->net_ns); > + sock_init_data(NULL, sk); > + > + skb = build_skb(data, 0); > + if (!skb) { > + kfree(data); > + kfree(sk); > + return -ENOMEM; > + } > + skb->sk = sk; > + > + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); > + __skb_put(skb, size); > + skb->protocol = eth_type_trans(skb, > + > current->nsproxy->net_ns->loopback_dev); > + skb_reset_network_header(skb); > + > + retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys); > + > + ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), > + retval, duration); > + kfree_skb(skb); > + kfree(sk); > + return ret; > +} > diff --git a/net/core/filter.c b/net/core/filter.c > index bd0df75dc7b6..4eae6102399d 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops > flow_dissector_verifier_ops = { > }; > > const struct bpf_prog_ops flow_dissector_prog_ops = { > + .test_run = bpf_prog_test_run_flow_dissector, > }; > > int sk_detach_filter(struct sock *sk) > -- > 2.20.0.rc1.387.gf8505762e3-goog >