From: Björn Töpel <bjorn.to...@intel.com>

Previously the fill queue descriptor was not copied to kernel space
prior validating it, making it possible for userland to change the
descriptor post-kernel-validation.

Signed-off-by: Björn Töpel <bjorn.to...@intel.com>
---
 net/xdp/xsk.c       | 11 +++++------
 net/xdp/xsk_queue.h | 32 +++++++++-----------------------
 2 files changed, 14 insertions(+), 29 deletions(-)

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index cce0e4f8a536..43554eb56fe6 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
 
 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
 {
-       u32 *id, len = xdp->data_end - xdp->data;
+       u32 id, len = xdp->data_end - xdp->data;
        void *buffer;
-       int err = 0;
+       int err;
 
        if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
                return -EINVAL;
 
-       id = xskq_peek_id(xs->umem->fq);
-       if (!id)
+       if (!xskq_peek_id(xs->umem->fq, &id))
                return -ENOSPC;
 
-       buffer = xdp_umem_get_data_with_headroom(xs->umem, *id);
+       buffer = xdp_umem_get_data_with_headroom(xs->umem, id);
        memcpy(buffer, xdp->data, len);
-       err = xskq_produce_batch_desc(xs->rx, *id, len,
+       err = xskq_produce_batch_desc(xs->rx, id, len,
                                      xs->umem->frame_headroom);
        if (!err)
                xskq_discard_id(xs->umem->fq);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cb8e5be35110..b5924e7aeb2b 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, 
u32 idx)
        return true;
 }
 
-static inline u32 *xskq_validate_id(struct xsk_queue *q)
+static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
 {
        while (q->cons_tail != q->cons_head) {
                struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
                unsigned int idx = q->cons_tail & q->ring_mask;
 
-               if (xskq_is_valid_id(q, ring->desc[idx]))
-                       return &ring->desc[idx];
+               *id = READ_ONCE(ring->desc[idx]);
+               if (xskq_is_valid_id(q, *id))
+                       return id;
 
                q->cons_tail++;
        }
@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
        return NULL;
 }
 
-static inline u32 *xskq_peek_id(struct xsk_queue *q)
+static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
 {
-       struct xdp_umem_ring *ring;
-
        if (q->cons_tail == q->cons_head) {
                WRITE_ONCE(q->ring->consumer, q->cons_tail);
                q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
 
                /* Order consumer and data */
                smp_rmb();
-
-               return xskq_validate_id(q);
        }
 
-       ring = (struct xdp_umem_ring *)q->ring;
-       return &ring->desc[q->cons_tail & q->ring_mask];
+       return xskq_validate_id(q, id);
 }
 
 static inline void xskq_discard_id(struct xsk_queue *q)
 {
        q->cons_tail++;
-       (void)xskq_validate_id(q);
 }
 
 static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct 
xsk_queue *q,
                struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
                unsigned int idx = q->cons_tail & q->ring_mask;
 
-               if (xskq_is_valid_desc(q, &ring->desc[idx])) {
-                       if (desc)
-                               *desc = ring->desc[idx];
+               *desc = READ_ONCE(ring->desc[idx]);
+               if (xskq_is_valid_desc(q, desc))
                        return desc;
-               }
 
                q->cons_tail++;
        }
@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct 
xsk_queue *q,
 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
                                              struct xdp_desc *desc)
 {
-       struct xdp_rxtx_ring *ring;
-
        if (q->cons_tail == q->cons_head) {
                WRITE_ONCE(q->ring->consumer, q->cons_tail);
                q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
 
                /* Order consumer and data */
                smp_rmb();
-
-               return xskq_validate_desc(q, desc);
        }
 
-       ring = (struct xdp_rxtx_ring *)q->ring;
-       *desc = ring->desc[q->cons_tail & q->ring_mask];
-       return desc;
+       return xskq_validate_desc(q, desc);
 }
 
 static inline void xskq_discard_desc(struct xsk_queue *q)
 {
        q->cons_tail++;
-       (void)xskq_validate_desc(q, NULL);
 }
 
 static inline int xskq_produce_batch_desc(struct xsk_queue *q,
-- 
2.14.1

Reply via email to