This moves vhost_net_open so in the next patches we can pass
vhost_dev_init a new helper which will use the stop/flush functions.
There is no functionality changes in this patch.

Signed-off-by: Mike Christie <[email protected]>
---
 drivers/vhost/net.c | 134 ++++++++++++++++++++++----------------------
 1 file changed, 67 insertions(+), 67 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 07181cd8d52e..8557072ff05e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1285,73 +1285,6 @@ static void handle_rx_net(struct vhost_work *work)
        handle_rx(net);
 }
 
-static int vhost_net_open(struct inode *inode, struct file *f)
-{
-       struct vhost_net *n;
-       struct vhost_dev *dev;
-       struct vhost_virtqueue **vqs;
-       void **queue;
-       struct xdp_buff *xdp;
-       int i;
-
-       n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
-       if (!n)
-               return -ENOMEM;
-       vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
-       if (!vqs) {
-               kvfree(n);
-               return -ENOMEM;
-       }
-
-       queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
-                             GFP_KERNEL);
-       if (!queue) {
-               kfree(vqs);
-               kvfree(n);
-               return -ENOMEM;
-       }
-       n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
-
-       xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
-       if (!xdp) {
-               kfree(vqs);
-               kvfree(n);
-               kfree(queue);
-               return -ENOMEM;
-       }
-       n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
-
-       dev = &n->dev;
-       vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
-       vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
-       n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
-       n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
-       for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
-               n->vqs[i].ubufs = NULL;
-               n->vqs[i].ubuf_info = NULL;
-               n->vqs[i].upend_idx = 0;
-               n->vqs[i].done_idx = 0;
-               n->vqs[i].batched_xdp = 0;
-               n->vqs[i].vhost_hlen = 0;
-               n->vqs[i].sock_hlen = 0;
-               n->vqs[i].rx_ring = NULL;
-               vhost_net_buf_init(&n->vqs[i].rxq);
-       }
-       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
-                      UIO_MAXIOV + VHOST_NET_BATCH,
-                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
-                      NULL);
-
-       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, 
dev);
-       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
-
-       f->private_data = n;
-       n->page_frag.page = NULL;
-       n->refcnt_bias = 0;
-
-       return 0;
-}
-
 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
                                        struct vhost_virtqueue *vq)
 {
@@ -1421,6 +1354,73 @@ static int vhost_net_release(struct inode *inode, struct 
file *f)
        return 0;
 }
 
+static int vhost_net_open(struct inode *inode, struct file *f)
+{
+       struct vhost_net *n;
+       struct vhost_dev *dev;
+       struct vhost_virtqueue **vqs;
+       void **queue;
+       struct xdp_buff *xdp;
+       int i;
+
+       n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       if (!n)
+               return -ENOMEM;
+       vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
+       if (!vqs) {
+               kvfree(n);
+               return -ENOMEM;
+       }
+
+       queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
+                             GFP_KERNEL);
+       if (!queue) {
+               kfree(vqs);
+               kvfree(n);
+               return -ENOMEM;
+       }
+       n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+
+       xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
+       if (!xdp) {
+               kfree(vqs);
+               kvfree(n);
+               kfree(queue);
+               return -ENOMEM;
+       }
+       n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
+
+       dev = &n->dev;
+       vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
+       vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
+       n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
+       n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
+       for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
+               n->vqs[i].ubufs = NULL;
+               n->vqs[i].ubuf_info = NULL;
+               n->vqs[i].upend_idx = 0;
+               n->vqs[i].done_idx = 0;
+               n->vqs[i].batched_xdp = 0;
+               n->vqs[i].vhost_hlen = 0;
+               n->vqs[i].sock_hlen = 0;
+               n->vqs[i].rx_ring = NULL;
+               vhost_net_buf_init(&n->vqs[i].rxq);
+       }
+       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+                      UIO_MAXIOV + VHOST_NET_BATCH,
+                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
+                      NULL);
+
+       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, 
dev);
+       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
+
+       f->private_data = n;
+       n->page_frag.page = NULL;
+       n->refcnt_bias = 0;
+
+       return 0;
+}
+
 static struct socket *get_raw_socket(int fd)
 {
        int r;
-- 
2.25.1

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to