Currently the xsk_umem_consume_tx expects only the physical NICs so
the api returns a dma address.  This patch introduce the new function
to return the virtual address, when XSK is used by a virtual device.

Signed-off-by: William Tu <u9012...@gmail.com>
---
 include/net/xdp_sock.h |  7 +++++++
 net/xdp/xdp_umem.c     |  1 +
 net/xdp/xsk.c          | 38 +++++++++++++++++++++++++++++++-------
 3 files changed, 39 insertions(+), 7 deletions(-)

diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 13acb9803a6d..7fefe74f7fb5 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -81,6 +81,7 @@ u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
 void xsk_umem_discard_addr(struct xdp_umem *umem);
 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len);
 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
@@ -165,6 +166,12 @@ static inline bool xsk_umem_consume_tx(struct xdp_umem 
*umem, dma_addr_t *dma,
        return false;
 }
 
+static inline bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem,
+                                              void **vaddr, u32 *len)
+{
+       return false;
+}
+
 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 {
 }
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..424ae2538f9f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -60,6 +60,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
 
        return NULL;
 }
+EXPORT_SYMBOL(xdp_get_umem_from_qid);
 
 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
 {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 07156f43d295..0e252047f55f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -170,22 +170,19 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
 
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+static __always_inline bool __xsk_umem_consume_tx(struct xdp_umem *umem,
+                                                 struct xdp_desc *desc)
 {
-       struct xdp_desc desc;
        struct xdp_sock *xs;
 
        rcu_read_lock();
        list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
-               if (!xskq_peek_desc(xs->tx, &desc))
+               if (!xskq_peek_desc(xs->tx, desc))
                        continue;
 
-               if (xskq_produce_addr_lazy(umem->cq, desc.addr))
+               if (xskq_produce_addr_lazy(umem->cq, desc->addr))
                        goto out;
 
-               *dma = xdp_umem_get_dma(umem, desc.addr);
-               *len = desc.len;
-
                xskq_discard_desc(xs->tx);
                rcu_read_unlock();
                return true;
@@ -195,8 +192,35 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t 
*dma, u32 *len)
        rcu_read_unlock();
        return false;
 }
+
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+{
+       struct xdp_desc desc;
+
+       if (!__xsk_umem_consume_tx(umem, &desc))
+               return false;
+
+       *dma = xdp_umem_get_dma(umem, desc.addr);
+       *len = desc.len;
+
+       return true;
+}
 EXPORT_SYMBOL(xsk_umem_consume_tx);
 
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **vaddr, u32 *len)
+{
+       struct xdp_desc desc;
+
+       if (!__xsk_umem_consume_tx(umem, &desc))
+               return false;
+
+       *vaddr = xdp_umem_get_data(umem, desc.addr);
+       *len = desc.len;
+
+       return true;
+}
+EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
+
 static int xsk_zc_xmit(struct sock *sk)
 {
        struct xdp_sock *xs = xdp_sk(sk);
-- 
2.7.4

Reply via email to