在 2022/1/22 上午4:27, Eugenio Pérez 写道:
Since it's a device property, it can be done in net/. This helps SVQ to allocate the rings in vdpa device initialization, rather than delay that. Signed-off-by: Eugenio Pérez <[email protected]> --- hw/virtio/vhost-vdpa.c | 15 --------------- net/vhost-vdpa.c | 32 ++++++++++++++++++++++++--------
I don't understand here, since we will support device other than net?
2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 75090d65e8..2491c05d29 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -350,19 +350,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) return 0; }-static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)-{ - int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, - &v->iova_range); - if (ret != 0) { - v->iova_range.first = 0; - v->iova_range.last = UINT64_MAX; - } - - trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, - v->iova_range.last); -}
Let's just export this instead? Thanks
- static bool vhost_vdpa_one_time_request(struct vhost_dev *dev) { struct vhost_vdpa *v = dev->opaque; @@ -1295,8 +1282,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) goto err; }- vhost_vdpa_get_iova_range(v);- if (vhost_vdpa_one_time_request(dev)) { return 0; } diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 4befba5cc7..cc9cecf8d1 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -22,6 +22,7 @@ #include <sys/ioctl.h> #include <err.h> #include "standard-headers/linux/virtio_net.h" +#include "standard-headers/linux/vhost_types.h" #include "monitor/monitor.h" #include "hw/virtio/vhost.h"@@ -187,13 +188,25 @@ static NetClientInfo net_vhost_vdpa_info = {.check_peer_type = vhost_vdpa_check_peer_type, };+static void vhost_vdpa_get_iova_range(int fd,+ struct vhost_vdpa_iova_range *iova_range) +{ + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); + + if (ret < 0) { + iova_range->first = 0; + iova_range->last = UINT64_MAX; + } +} + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, - const char *device, - const char *name, - int vdpa_device_fd, - int queue_pair_index, - int nvqs, - bool is_datapath) + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath, + struct vhost_vdpa_iova_range iova_range) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -211,6 +224,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,s->vhost_vdpa.device_fd = vdpa_device_fd;s->vhost_vdpa.index = queue_pair_index; + s->vhost_vdpa.iova_range = iova_range; ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); @@ -267,6 +281,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, g_autofree NetClientState **ncs = NULL; NetClientState *nc; int queue_pairs, i, has_cvq = 0; + struct vhost_vdpa_iova_range iova_range;assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);opts = &netdev->u.vhost_vdpa; @@ -286,19 +301,20 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, qemu_close(vdpa_device_fd); return queue_pairs; } + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) {ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 2, true); + vdpa_device_fd, i, 2, true, iova_range); if (!ncs[i]) goto err; }if (has_cvq) {nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 1, false); + vdpa_device_fd, i, 1, false, iova_range); if (!nc) goto err; }
