From: Björn Töpel <bjorn.to...@intel.com>

This commit wires up the xskmap to XDP_SKB layer.

Signed-off-by: Björn Töpel <bjorn.to...@intel.com>
---
 include/linux/filter.h |  2 +-
 include/net/xdp_sock.h |  6 ++++++
 net/core/dev.c         | 28 +++++++++++++++-------------
 net/core/filter.c      | 17 ++++++++++++++---
 4 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 109d05ccea9a..6fa5e53d5fba 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -763,7 +763,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog 
*prog, u32 off,
  * This does not appear to be a real limitation for existing software.
  */
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *prog);
+                           struct xdp_buff *xdp, struct bpf_prog *prog);
 int xdp_do_redirect(struct net_device *dev,
                    struct xdp_buff *xdp,
                    struct bpf_prog *prog);
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 80d119a685b2..7b3b29ba9ff0 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -18,9 +18,15 @@
 struct xdp_sock;
 struct xdp_buff;
 #ifdef CONFIG_XDP_SOCKETS
+int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
 void xsk_flush(struct xdp_sock *xs);
 #else
+static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+       return -ENOTSUPP;
+}
+
 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
 {
        return -ENOTSUPP;
diff --git a/net/core/dev.c b/net/core/dev.c
index 97a96df4b6da..3632d959af1b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3985,11 +3985,11 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct 
sk_buff *skb)
 }
 
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+                                    struct xdp_buff *xdp,
                                     struct bpf_prog *xdp_prog)
 {
        struct netdev_rx_queue *rxqueue;
        u32 metalen, act = XDP_DROP;
-       struct xdp_buff xdp;
        void *orig_data;
        int hlen, off;
        u32 mac_len;
@@ -4025,18 +4025,18 @@ static u32 netif_receive_generic_xdp(struct sk_buff 
*skb,
         */
        mac_len = skb->data - skb_mac_header(skb);
        hlen = skb_headlen(skb) + mac_len;
-       xdp.data = skb->data - mac_len;
-       xdp.data_meta = xdp.data;
-       xdp.data_end = xdp.data + hlen;
-       xdp.data_hard_start = skb->data - skb_headroom(skb);
-       orig_data = xdp.data;
+       xdp->data = skb->data - mac_len;
+       xdp->data_meta = xdp->data;
+       xdp->data_end = xdp->data + hlen;
+       xdp->data_hard_start = skb->data - skb_headroom(skb);
+       orig_data = xdp->data;
 
        rxqueue = netif_get_rxqueue(skb);
-       xdp.rxq = &rxqueue->xdp_rxq;
+       xdp->rxq = &rxqueue->xdp_rxq;
 
-       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
 
-       off = xdp.data - orig_data;
+       off = xdp->data - orig_data;
        if (off > 0)
                __skb_pull(skb, off);
        else if (off < 0)
@@ -4049,7 +4049,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                __skb_push(skb, mac_len);
                break;
        case XDP_PASS:
-               metalen = xdp.data - xdp.data_meta;
+               metalen = xdp->data - xdp->data_meta;
                if (metalen)
                        skb_metadata_set(skb, metalen);
                break;
@@ -4099,17 +4099,19 @@ static struct static_key generic_xdp_needed 
__read_mostly;
 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
 {
        if (xdp_prog) {
-               u32 act = netif_receive_generic_xdp(skb, xdp_prog);
+               struct xdp_buff xdp;
+               u32 act;
                int err;
 
+               act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
                if (act != XDP_PASS) {
                        switch (act) {
                        case XDP_REDIRECT:
                                err = xdp_do_generic_redirect(skb->dev, skb,
-                                                             xdp_prog);
+                                                             &xdp, xdp_prog);
                                if (err)
                                        goto out_redir;
-                       /* fallthru to submit skb */
+                               break;
                        case XDP_TX:
                                generic_xdp_tx(skb, xdp_prog);
                                break;
diff --git a/net/core/filter.c b/net/core/filter.c
index 4b09c0a02814..fc71b53542d2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -57,6 +57,7 @@
 #include <net/busy_poll.h>
 #include <net/tcp.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
 
 /**
  *     sk_filter_trim_cap - run a packet through a socket filter
@@ -2932,13 +2933,14 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff 
*skb, struct net_device *fwd)
 
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
+                                      struct xdp_buff *xdp,
                                       struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        unsigned long map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
-       struct net_device *fwd = NULL;
        u32 index = ri->ifindex;
+       void *fwd = NULL;
        int err = 0;
 
        ri->ifindex = 0;
@@ -2960,6 +2962,14 @@ static int xdp_do_generic_redirect_map(struct net_device 
*dev,
                if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
                        goto err;
                skb->dev = fwd;
+               generic_xdp_tx(skb, xdp_prog);
+       } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
+               struct xdp_sock *xs = fwd;
+
+               err = xsk_generic_rcv(xs, xdp);
+               if (err)
+                       goto err;
+               consume_skb(skb);
        } else {
                /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
                err = -EBADRQC;
@@ -2974,7 +2984,7 @@ static int xdp_do_generic_redirect_map(struct net_device 
*dev,
 }
 
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *xdp_prog)
+                           struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        u32 index = ri->ifindex;
@@ -2982,7 +2992,7 @@ int xdp_do_generic_redirect(struct net_device *dev, 
struct sk_buff *skb,
        int err = 0;
 
        if (ri->map)
-               return xdp_do_generic_redirect_map(dev, skb, xdp_prog);
+               return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
 
        ri->ifindex = 0;
        fwd = dev_get_by_index_rcu(dev_net(dev), index);
@@ -2996,6 +3006,7 @@ int xdp_do_generic_redirect(struct net_device *dev, 
struct sk_buff *skb,
 
        skb->dev = fwd;
        _trace_xdp_redirect(dev, xdp_prog, index);
+       generic_xdp_tx(skb, xdp_prog);
        return 0;
 err:
        _trace_xdp_redirect_err(dev, xdp_prog, index, err);
-- 
2.14.1

Reply via email to