On 05/02/2018 01:01 PM, Björn Töpel wrote: > From: Magnus Karlsson <magnus.karls...@intel.com> > > Here, we add another setsockopt for registered user memory (umem) > called XDP_UMEM_FILL_QUEUE. Using this socket option, the process can > ask the kernel to allocate a queue (ring buffer) and also mmap it > (XDP_UMEM_PGOFF_FILL_QUEUE) into the process. > > The queue is used to explicitly pass ownership of umem frames from the > user process to the kernel. These frames will in a later patch be > filled in with Rx packet data by the kernel. > > v2: Fixed potential crash in xsk_mmap. > > Signed-off-by: Magnus Karlsson <magnus.karls...@intel.com> > --- > include/uapi/linux/if_xdp.h | 15 +++++++++++ > net/xdp/Makefile | 2 +- > net/xdp/xdp_umem.c | 5 ++++ > net/xdp/xdp_umem.h | 2 ++ > net/xdp/xsk.c | 65 > ++++++++++++++++++++++++++++++++++++++++++++- > net/xdp/xsk_queue.c | 58 ++++++++++++++++++++++++++++++++++++++++ > net/xdp/xsk_queue.h | 38 ++++++++++++++++++++++++++ > 7 files changed, 183 insertions(+), 2 deletions(-) > create mode 100644 net/xdp/xsk_queue.c > create mode 100644 net/xdp/xsk_queue.h > > diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h > index 41252135a0fe..975661e1baca 100644 > --- a/include/uapi/linux/if_xdp.h > +++ b/include/uapi/linux/if_xdp.h > @@ -23,6 +23,7 @@ > > /* XDP socket options */ > #define XDP_UMEM_REG 3 > +#define XDP_UMEM_FILL_RING 4 > > struct xdp_umem_reg { > __u64 addr; /* Start of packet data area */ > @@ -31,4 +32,18 @@ struct xdp_umem_reg { > __u32 frame_headroom; /* Frame head room */ > }; > > +/* Pgoff for mmaping the rings */ > +#define XDP_UMEM_PGOFF_FILL_RING 0x100000000 > + > +struct xdp_ring { > + __u32 producer __attribute__((aligned(64))); > + __u32 consumer __attribute__((aligned(64))); > +}; > + > +/* Used for the fill and completion queues for buffers */ > +struct xdp_umem_ring { > + struct xdp_ring ptrs; > + __u32 desc[0] __attribute__((aligned(64))); > +}; > + > #endif /* _LINUX_IF_XDP_H */ > diff --git a/net/xdp/Makefile b/net/xdp/Makefile > index a5d736640a0f..074fb2b2d51c 100644 > --- a/net/xdp/Makefile > +++ b/net/xdp/Makefile > @@ -1,2 +1,2 @@ > -obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o > +obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o > > diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c > index ec8b3552be44..e1f627d0cc1c 100644 > --- a/net/xdp/xdp_umem.c > +++ b/net/xdp/xdp_umem.c > @@ -65,6 +65,11 @@ static void xdp_umem_release(struct xdp_umem *umem) > struct task_struct *task; > struct mm_struct *mm; > > + if (umem->fq) { > + xskq_destroy(umem->fq); > + umem->fq = NULL; > + } > + > if (umem->pgs) { > xdp_umem_unpin_pages(umem); > > diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h > index 4597ae81a221..25634b8a5c6f 100644 > --- a/net/xdp/xdp_umem.h > +++ b/net/xdp/xdp_umem.h > @@ -19,9 +19,11 @@ > #include <linux/if_xdp.h> > #include <linux/workqueue.h> > > +#include "xsk_queue.h" > #include "xdp_umem_props.h" > > struct xdp_umem { > + struct xsk_queue *fq; > struct page **pgs; > struct xdp_umem_props props; > u32 npgs; > diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c > index 84e0e867febb..da67a3c5c1c9 100644 > --- a/net/xdp/xsk.c > +++ b/net/xdp/xsk.c > @@ -32,6 +32,7 @@ > #include <linux/netdevice.h> > #include <net/xdp_sock.h> > > +#include "xsk_queue.h" > #include "xdp_umem.h" > > static struct xdp_sock *xdp_sk(struct sock *sk) > @@ -39,6 +40,21 @@ static struct xdp_sock *xdp_sk(struct sock *sk) > return (struct xdp_sock *)sk; > } > > +static int xsk_init_queue(u32 entries, struct xsk_queue **queue) > +{ > + struct xsk_queue *q; > + > + if (entries == 0 || *queue || !is_power_of_2(entries)) > + return -EINVAL; > + > + q = xskq_create(entries); > + if (!q) > + return -ENOMEM; > + > + *queue = q; > + return 0; > +} > + > static int xsk_release(struct socket *sock) > { > struct sock *sk = sock->sk; > @@ -101,6 +117,23 @@ static int xsk_setsockopt(struct socket *sock, int > level, int optname, > mutex_unlock(&xs->mutex); > return 0; > } > + case XDP_UMEM_FILL_RING: > + { > + struct xsk_queue **q; > + int entries; > + > + if (!xs->umem) > + return -EINVAL;
(Same here as previously mentioned.) > + if (copy_from_user(&entries, optval, sizeof(entries))) > + return -EFAULT; > + > + mutex_lock(&xs->mutex); > + q = &xs->umem->fq; > + err = xsk_init_queue(entries, q); > + mutex_unlock(&xs->mutex); > + return err; > + } > default: > break; > } > @@ -108,6 +141,36 @@ static int xsk_setsockopt(struct socket *sock, int > level, int optname, > return -ENOPROTOOPT; > } > > +static int xsk_mmap(struct file *file, struct socket *sock, > + struct vm_area_struct *vma) > +{ > + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; > + unsigned long size = vma->vm_end - vma->vm_start; > + struct xdp_sock *xs = xdp_sk(sock->sk); > + struct xsk_queue *q = NULL; > + unsigned long pfn; > + struct page *qpg; > + > + if (!xs->umem) > + return -EINVAL; > + > + if (offset == XDP_UMEM_PGOFF_FILL_RING) > + q = xs->umem->fq; > + else > + return -EINVAL; > + > + if (!q) > + return -EINVAL; Nit: since q is NULL above, could be simplified as: if (offset == XDP_UMEM_PGOFF_FILL_RING) q = xs->umem->fq; if (!q) return -EINVAL; > + > + qpg = virt_to_head_page(q->ring); > + if (size > (PAGE_SIZE << compound_order(qpg))) > + return -EINVAL; > + > + pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; > + return remap_pfn_range(vma, vma->vm_start, pfn, > + size, vma->vm_page_prot); > +} > + > static struct proto xsk_proto = { > .name = "XDP", > .owner = THIS_MODULE, > @@ -131,7 +194,7 @@ static const struct proto_ops xsk_proto_ops = { > .getsockopt = sock_no_getsockopt, > .sendmsg = sock_no_sendmsg, > .recvmsg = sock_no_recvmsg, > - .mmap = sock_no_mmap, > + .mmap = xsk_mmap, > .sendpage = sock_no_sendpage, > }; >