Hello Maxime,
On Wed, Sep 29, 2021 at 10:18 PM Maxime Coquelin <maxime.coque...@redhat.com> wrote: > > This patch removes the simplification in Virtio descriptors > handling, where their buffer addresses are IOVAs for Virtio > PCI devices, and VA-only for Virtio-user devices, which > added a requirement on Virtio-user that it only supported > IOVA as VA. > > This change introduced a regression for applications using > Virtio-user and other physical PMDs that require IOVA as PA > because they don't use an IOMMU. > > This patch reverts to the old behaviour, but needed to be > reworked because of the refactoring that happened in v21.02. > > Fixes: 17043a2909bb ("net/virtio: force IOVA as VA mode for virtio-user") > Cc: sta...@dpdk.org > > Reported-by: Olivier Matz <olivier.m...@6wind.com> > Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> This patch does not apply on next-virtio, but you are best placed to figure this out :-). Quick look, only nits otherwise patch lgtm. > --- > drivers/net/virtio/virtio.h | 1 + > drivers/net/virtio/virtio_ethdev.c | 25 +++++++++++++---- > drivers/net/virtio/virtio_rxtx.c | 28 ++++++++++++-------- > drivers/net/virtio/virtio_rxtx_packed.h | 2 +- > drivers/net/virtio/virtio_rxtx_packed_avx.h | 8 +++--- > drivers/net/virtio/virtio_rxtx_packed_neon.h | 8 +++--- > drivers/net/virtio/virtio_rxtx_simple.h | 3 ++- > drivers/net/virtio/virtio_user_ethdev.c | 7 ++++- > drivers/net/virtio/virtqueue.h | 22 ++++++++++++++- > 9 files changed, 76 insertions(+), 28 deletions(-) > > diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h > index b4f21dc0c7..7118e5d24c 100644 > --- a/drivers/net/virtio/virtio.h > +++ b/drivers/net/virtio/virtio.h > @@ -221,6 +221,7 @@ struct virtio_hw { > uint8_t *rss_key; > uint64_t req_guest_features; > struct virtnet_ctl *cvq; > + bool use_va; > }; > > struct virtio_ops { > diff --git a/drivers/net/virtio/virtio_ethdev.c > b/drivers/net/virtio/virtio_ethdev.c > index b4bd1f07c1..8055be88a2 100644 > --- a/drivers/net/virtio/virtio_ethdev.c > +++ b/drivers/net/virtio/virtio_ethdev.c > @@ -567,12 +567,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t > queue_idx) > > memset(mz->addr, 0, mz->len); > > - vq->vq_ring_mem = mz->iova; > + if (hw->use_va) > + vq->vq_ring_mem = (uintptr_t)mz->addr; > + else > + vq->vq_ring_mem = mz->iova; > + > vq->vq_ring_virt_mem = mz->addr; > PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, > - (uint64_t)mz->iova); > + (uint64_t)vq->vq_ring_mem); vq_ring_mem is a rte_iova_t which is a uint64_t. Cast is unneeded. > PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, > - (uint64_t)(uintptr_t)mz->addr); > + (uint64_t)(uintptr_t)vq->vq_ring_virt_mem); Why not display with %p and drop casts? > > virtio_init_vring(vq); > > @@ -622,17 +626,28 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t > queue_idx) > txvq->port_id = dev->data->port_id; > txvq->mz = mz; > txvq->virtio_net_hdr_mz = hdr_mz; > - txvq->virtio_net_hdr_mem = hdr_mz->iova; > + if (hw->use_va) > + txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; > + else > + txvq->virtio_net_hdr_mem = hdr_mz->iova; > } else if (queue_type == VTNET_CQ) { > cvq = &vq->cq; > cvq->mz = mz; > cvq->virtio_net_hdr_mz = hdr_mz; > - cvq->virtio_net_hdr_mem = hdr_mz->iova; > + if (hw->use_va) > + cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; > + else > + cvq->virtio_net_hdr_mem = hdr_mz->iova; > memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size()); > > hw->cvq = cvq; > } > > + if (hw->use_va) > + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr); > + else > + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova); > + > if (queue_type == VTNET_TQ) { > struct virtio_tx_region *txr; > unsigned int i; > diff --git a/drivers/net/virtio/virtio_rxtx.c > b/drivers/net/virtio/virtio_rxtx.c > index b9d7c8d18f..0f3c286438 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -271,10 +271,13 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq, > dxp->cookie = (void *)cookies[i]; > dxp->ndescs = 1; > > - start_dp[idx].addr = cookies[i]->buf_iova + > - RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > - start_dp[idx].len = cookies[i]->buf_len - > - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; > + start_dp[idx].addr = > + VIRTIO_MBUF_ADDR(cookies[i], vq) + > + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; A single <tab> is enough indent. > + start_dp[idx].len = > + cookies[i]->buf_len - > + RTE_PKTMBUF_HEADROOM + > + hw->vtnet_hdr_size; This part needs no update. In the end for this hunk, we only need: - start_dp[idx].addr = cookies[i]->buf_iova + + start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) + > start_dp[idx].flags = VRING_DESC_F_WRITE; > > vq_update_avail_ring(vq, idx); > @@ -310,10 +313,12 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, > struct rte_mbuf **cookie, > dxp->cookie = (void *)cookie[i]; > dxp->ndescs = 1; > > - start_dp[idx].addr = cookie[i]->buf_iova + > + start_dp[idx].addr = > + VIRTIO_MBUF_ADDR(cookie[i], vq) + > RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > - start_dp[idx].len = cookie[i]->buf_len - > - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; > + start_dp[idx].len = > + cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM + > + hw->vtnet_hdr_size; > start_dp[idx].flags = VRING_DESC_F_WRITE; > vq->vq_desc_head_idx = start_dp[idx].next; > vq_update_avail_ring(vq, idx); Same comment as above, we only need: - start_dp[idx].addr = cookie[i]->buf_iova + + start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) + > @@ -336,7 +341,7 @@ virtqueue_refill_single_packed(struct virtqueue *vq, > uint16_t flags = vq->vq_packed.cached_flags; > struct virtio_hw *hw = vq->hw; > > - dp->addr = cookie->buf_iova + > + dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + > RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > dp->len = cookie->buf_len - > RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; > @@ -482,7 +487,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, > else > virtqueue_xmit_offload(hdr, cookies[i]); > > - start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - > head_size; > + start_dp[idx].addr = > + VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size; We could go a little over 80 columns. > start_dp[idx].len = cookies[i]->data_len + head_size; > start_dp[idx].flags = 0; > > @@ -529,7 +535,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx > *txvq, > else > virtqueue_xmit_offload(hdr, cookie); > > - dp->addr = rte_mbuf_data_iova(cookie) - head_size; > + dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size; > dp->len = cookie->data_len + head_size; > dp->id = id; > > @@ -617,7 +623,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct > rte_mbuf *cookie, > virtqueue_xmit_offload(hdr, cookie); > > do { > - start_dp[idx].addr = rte_mbuf_data_iova(cookie); > + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); > start_dp[idx].len = cookie->data_len; > if (prepend_header) { > start_dp[idx].addr -= head_size; [snip] > diff --git a/drivers/net/virtio/virtio_rxtx_simple.h > b/drivers/net/virtio/virtio_rxtx_simple.h > index f258771fcf..497c9a0e32 100644 > --- a/drivers/net/virtio/virtio_rxtx_simple.h > +++ b/drivers/net/virtio/virtio_rxtx_simple.h > @@ -43,7 +43,8 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) > p = (uintptr_t)&sw_ring[i]->rearm_data; > *(uint64_t *)p = rxvq->mbuf_initializer; > > - start_dp[i].addr = sw_ring[i]->buf_iova + > + start_dp[i].addr = > + VIRTIO_MBUF_ADDR(sw_ring[i], vq) + This fits in 80 columns. > RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size; > start_dp[i].len = sw_ring[i]->buf_len - > RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; [snip] -- David Marchand