Now that we have __flow_bpf_dissect which works on raw data (by
constructing temporary on-stack skb), use it when doing
BPF_PROG_TEST_RUN for flow dissector.

This should help us catch any possible bugs due to missing shinfo on
the per-cpu skb.

Note that existing __skb_flow_bpf_dissect swallows L2 headers and returns
nhoff=0, we need to preserve the existing behavior.

Signed-off-by: Stanislav Fomichev <s...@google.com>
---
 net/bpf/test_run.c | 48 ++++++++++++++--------------------------------
 1 file changed, 14 insertions(+), 34 deletions(-)

diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 512773a95ad5..90f7eaf129c6 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -252,10 +252,8 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
        u32 repeat = kattr->test.repeat;
        struct bpf_flow_keys flow_keys;
        u64 time_start, time_spent = 0;
-       struct bpf_skb_data_end *cb;
+       const struct ethhdr *eth;
        u32 retval, duration;
-       struct sk_buff *skb;
-       struct sock *sk;
        void *data;
        int ret;
        u32 i;
@@ -263,35 +261,14 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog 
*prog,
        if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
                return -EINVAL;
 
-       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
-                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+       if (size < ETH_HLEN)
+               return -EINVAL;
+
+       data = bpf_test_init(kattr, size, 0, 0);
        if (IS_ERR(data))
                return PTR_ERR(data);
 
-       sk = kzalloc(sizeof(*sk), GFP_USER);
-       if (!sk) {
-               kfree(data);
-               return -ENOMEM;
-       }
-       sock_net_set(sk, current->nsproxy->net_ns);
-       sock_init_data(NULL, sk);
-
-       skb = build_skb(data, 0);
-       if (!skb) {
-               kfree(data);
-               kfree(sk);
-               return -ENOMEM;
-       }
-       skb->sk = sk;
-
-       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-       __skb_put(skb, size);
-       skb->protocol = eth_type_trans(skb,
-                                      current->nsproxy->net_ns->loopback_dev);
-       skb_reset_network_header(skb);
-
-       cb = (struct bpf_skb_data_end *)skb->cb;
-       cb->qdisc_cb.flow_keys = &flow_keys;
+       eth = (struct ethhdr *)data;
 
        if (!repeat)
                repeat = 1;
@@ -300,9 +277,13 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
        preempt_disable();
        time_start = ktime_get_ns();
        for (i = 0; i < repeat; i++) {
-               retval = bpf_flow_dissect_skb(prog, skb,
-                                             &flow_keys_dissector,
-                                             &flow_keys);
+               retval = bpf_flow_dissect(prog, data, eth->h_proto, ETH_HLEN,
+                                         size, &flow_keys_dissector,
+                                         &flow_keys);
+               if (flow_keys.nhoff >= ETH_HLEN)
+                       flow_keys.nhoff -= ETH_HLEN;
+               if (flow_keys.thoff >= ETH_HLEN)
+                       flow_keys.thoff -= ETH_HLEN;
 
                if (signal_pending(current)) {
                        preempt_enable();
@@ -335,7 +316,6 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
                              retval, duration);
 
 out:
-       kfree_skb(skb);
-       kfree(sk);
+       kfree(data);
        return ret;
 }
-- 
2.21.0.225.g810b269d1ac-goog

Reply via email to