* [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend
@ 2024-02-26 11:40 Srujana Challa
0 siblings, 0 replies; 4+ messages in thread
From: Srujana Challa @ 2024-02-26 11:40 UTC (permalink / raw)
To: dev, maxime.coquelin, chenbox; +Cc: jerinj, ndabilpuram, vattunuru, schalla
Disable use_va flag for VDPA backend type and fixes the issues
with shadow control command processing, when it is disabled.
This will help to make virtio user driver works in IOVA
as PA mode for vDPA backend.
Signed-off-by: Srujana Challa <schalla@marvell.com>
---
drivers/net/virtio/virtio_ring.h | 12 ++-
.../net/virtio/virtio_user/virtio_user_dev.c | 86 ++++++++++---------
drivers/net/virtio/virtio_user_ethdev.c | 10 ++-
drivers/net/virtio/virtqueue.c | 4 +-
4 files changed, 65 insertions(+), 47 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b73b..998605dbb5 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -83,6 +83,7 @@ struct vring_packed_desc_event {
struct vring_packed {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_packed_desc *desc;
struct vring_packed_desc_event *driver;
struct vring_packed_desc_event *device;
@@ -90,6 +91,7 @@ struct vring_packed {
struct vring {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
@@ -149,11 +151,12 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
return size;
}
static inline void
-vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
+ vr->desc_iova = iova;
vr->avail = (struct vring_avail *) (p +
num * sizeof(struct vring_desc));
vr->used = (void *)
@@ -161,11 +164,12 @@ vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
}
static inline void
-vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_packed_desc *)p;
+ vr->desc_iova = iova;
vr->driver = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
vr->device = (struct vring_packed_desc_event *)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index d395fc1676..c8d28cdd35 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -62,6 +62,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
struct vhost_vring_state state;
struct vring *vring = &dev->vrings.split[queue_sel];
struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+ uint64_t desc_addr, avail_addr, used_addr;
struct vhost_vring_addr addr = {
.index = queue_sel,
.log_guest_addr = 0,
@@ -81,16 +82,23 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
}
if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
- addr.desc_user_addr =
- (uint64_t)(uintptr_t)pq_vring->desc;
- addr.avail_user_addr =
- (uint64_t)(uintptr_t)pq_vring->driver;
- addr.used_user_addr =
- (uint64_t)(uintptr_t)pq_vring->device;
+ desc_addr = pq_vring->desc_iova;
+ avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
} else {
- addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
- addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
- addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ desc_addr = vring->desc_iova;
+ avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
}
state.index = queue_sel;
@@ -885,11 +893,11 @@ static uint32_t
virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
- uint16_t i, idx_data, idx_status;
+ uint16_t i, idx_data;
uint32_t n_descs = 0;
int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
+ struct virtio_pmd_ctrl *ctrl;
/* locate desc for header, data, and status */
idx_data = vring->desc[idx_hdr].next;
@@ -902,34 +910,33 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
n_descs++;
}
- /* locate desc for status */
- idx_status = i;
n_descs++;
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
- queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ queues = *(uint16_t *)(uintptr_t)ctrl->data;
status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+ ctrl->status = status;
return n_descs;
}
@@ -948,7 +955,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
struct vring_packed *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
+ struct virtio_pmd_ctrl *ctrl;
virtio_net_ctrl_ack status = ~0;
uint16_t idx_data, idx_status;
/* initialize to one, header is first */
@@ -971,32 +978,31 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
- queues = *(uint16_t *)(uintptr_t)
- vring->desc[idx_data].addr;
+ queues = *(uint16_t *)(uintptr_t)ctrl->data;
status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc[idx_status].addr = status;
+ ctrl->status = status;
/* Update used descriptor */
vring->desc[idx_hdr].id = vring->desc[idx_status].id;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index bf9de36d8f..ae6593ba0b 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -198,6 +198,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
sizeof(struct vring_packed_desc_event),
VIRTIO_VRING_ALIGN);
vring->num = vq->vq_nentries;
+ vring->desc_iova = vq->vq_ring_mem;
vring->desc = (void *)(uintptr_t)desc_addr;
vring->driver = (void *)(uintptr_t)avail_addr;
vring->device = (void *)(uintptr_t)used_addr;
@@ -221,6 +222,7 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
VIRTIO_VRING_ALIGN);
dev->vrings.split[queue_idx].num = vq->vq_nentries;
+ dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
@@ -689,7 +691,13 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
* Virtio-user requires using virtual addresses for the descriptors
* buffers, whatever other devices require
*/
- hw->use_va = true;
+ if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
+ /* VDPA backend requires using iova for the buffers to make it
+ * work in IOVA as PA mode also.
+ */
+ hw->use_va = false;
+ else
+ hw->use_va = true;
/* previously called by pci probing for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 6f419665f1..cf46abfd06 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -282,13 +282,13 @@ virtio_init_vring(struct virtqueue *vq)
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (virtio_with_packed_queue(vq->hw)) {
- vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ vring_init_packed(&vq->vq_packed.ring, ring_mem, vq->vq_ring_mem,
VIRTIO_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, vq->vq_ring_mem, VIRTIO_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
/*
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend
2024-02-27 5:56 Srujana Challa
@ 2024-02-27 8:56 ` Maxime Coquelin
0 siblings, 0 replies; 4+ messages in thread
From: Maxime Coquelin @ 2024-02-27 8:56 UTC (permalink / raw)
To: Srujana Challa, dev, chenbox; +Cc: jerinj, ndabilpuram, vattunuru
Hi Srujana,
On 2/27/24 06:56, Srujana Challa wrote:
> Disable use_va flag for VDPA backend type and fixes the issues
> with shadow control command processing, when it is disabled.
> This will help to make virtio user driver works in IOVA
> as PA mode for vDPA backend.
It is too late for v24.03, but a couple of comments:
1. That's the 3rd time this patch is sent, if new versions please
specify it in the commit title, and also provide some change logs.
2. I just had a quick look, but it looks like it would deserve several
patches.
Thanks,
Maxime
> Signed-off-by: Srujana Challa <schalla@marvell.com>
> ---
> drivers/net/virtio/virtio_ring.h | 12 ++-
> .../net/virtio/virtio_user/virtio_user_dev.c | 94 ++++++++++---------
> drivers/net/virtio/virtio_user_ethdev.c | 10 +-
> drivers/net/virtio/virtqueue.c | 4 +-
> 4 files changed, 69 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
> index e848c0b73b..998605dbb5 100644
> --- a/drivers/net/virtio/virtio_ring.h
> +++ b/drivers/net/virtio/virtio_ring.h
> @@ -83,6 +83,7 @@ struct vring_packed_desc_event {
>
> struct vring_packed {
> unsigned int num;
> + rte_iova_t desc_iova;
> struct vring_packed_desc *desc;
> struct vring_packed_desc_event *driver;
> struct vring_packed_desc_event *device;
> @@ -90,6 +91,7 @@ struct vring_packed {
>
> struct vring {
> unsigned int num;
> + rte_iova_t desc_iova;
> struct vring_desc *desc;
> struct vring_avail *avail;
> struct vring_used *used;
> @@ -149,11 +151,12 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
> return size;
> }
> static inline void
> -vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
> - unsigned int num)
> +vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
> + unsigned long align, unsigned int num)
> {
> vr->num = num;
> vr->desc = (struct vring_desc *) p;
> + vr->desc_iova = iova;
> vr->avail = (struct vring_avail *) (p +
> num * sizeof(struct vring_desc));
> vr->used = (void *)
> @@ -161,11 +164,12 @@ vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
> }
>
> static inline void
> -vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
> - unsigned int num)
> +vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
> + unsigned long align, unsigned int num)
> {
> vr->num = num;
> vr->desc = (struct vring_packed_desc *)p;
> + vr->desc_iova = iova;
> vr->driver = (struct vring_packed_desc_event *)(p +
> vr->num * sizeof(struct vring_packed_desc));
> vr->device = (struct vring_packed_desc_event *)
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index d395fc1676..8ad10e6354 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -62,6 +62,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
> struct vhost_vring_state state;
> struct vring *vring = &dev->vrings.split[queue_sel];
> struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
> + uint64_t desc_addr, avail_addr, used_addr;
> struct vhost_vring_addr addr = {
> .index = queue_sel,
> .log_guest_addr = 0,
> @@ -81,16 +82,23 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
> }
>
> if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
> - addr.desc_user_addr =
> - (uint64_t)(uintptr_t)pq_vring->desc;
> - addr.avail_user_addr =
> - (uint64_t)(uintptr_t)pq_vring->driver;
> - addr.used_user_addr =
> - (uint64_t)(uintptr_t)pq_vring->device;
> + desc_addr = pq_vring->desc_iova;
> + avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
> + used_addr = RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
> + VIRTIO_VRING_ALIGN);
> +
> + addr.desc_user_addr = desc_addr;
> + addr.avail_user_addr = avail_addr;
> + addr.used_user_addr = used_addr;
> } else {
> - addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
> - addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
> - addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
> + desc_addr = vring->desc_iova;
> + avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
> + used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
> + VIRTIO_VRING_ALIGN);
> +
> + addr.desc_user_addr = desc_addr;
> + addr.avail_user_addr = avail_addr;
> + addr.used_user_addr = used_addr;
> }
>
> state.index = queue_sel;
> @@ -885,11 +893,11 @@ static uint32_t
> virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
> uint16_t idx_hdr)
> {
> - struct virtio_net_ctrl_hdr *hdr;
> virtio_net_ctrl_ack status = ~0;
> - uint16_t i, idx_data, idx_status;
> + uint16_t i, idx_data;
> uint32_t n_descs = 0;
> int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
> + struct virtio_pmd_ctrl *ctrl;
>
> /* locate desc for header, data, and status */
> idx_data = vring->desc[idx_hdr].next;
> @@ -902,34 +910,33 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
> n_descs++;
> }
>
> - /* locate desc for status */
> - idx_status = i;
> n_descs++;
>
> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
> - if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> - hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> - uint16_t queues;
> + /* Access control command via VA from CVQ */
> + ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
> + if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
> + ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> + uint16_t *queues;
>
> - queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
> - status = virtio_user_handle_mq(dev, queues);
> - } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> + queues = (uint16_t *)ctrl->data;
> + status = virtio_user_handle_mq(dev, *queues);
> + } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
> + ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> struct virtio_net_ctrl_rss *rss;
>
> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
> + rss = (struct virtio_net_ctrl_rss *)ctrl->data;
> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
> - } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
> - hdr->class == VIRTIO_NET_CTRL_MAC ||
> - hdr->class == VIRTIO_NET_CTRL_VLAN) {
> + } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
> + ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
> + ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
> status = 0;
> }
>
> if (!status && dev->scvq)
> - status = virtio_send_command(&dev->scvq->cq,
> - (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
> + status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
>
> /* Update status */
> - *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
> + ctrl->status = status;
>
> return n_descs;
> }
> @@ -948,7 +955,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
> struct vring_packed *vring,
> uint16_t idx_hdr)
> {
> - struct virtio_net_ctrl_hdr *hdr;
> + struct virtio_pmd_ctrl *ctrl;
> virtio_net_ctrl_ack status = ~0;
> uint16_t idx_data, idx_status;
> /* initialize to one, header is first */
> @@ -971,32 +978,31 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
> n_descs++;
> }
>
> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
> - if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> - hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> - uint16_t queues;
> + /* Access control command via VA from CVQ */
> + ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
> + if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
> + ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> + uint16_t *queues;
>
> - queues = *(uint16_t *)(uintptr_t)
> - vring->desc[idx_data].addr;
> - status = virtio_user_handle_mq(dev, queues);
> - } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> + queues = (uint16_t *)ctrl->data;
> + status = virtio_user_handle_mq(dev, *queues);
> + } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
> + ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> struct virtio_net_ctrl_rss *rss;
>
> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
> + rss = (struct virtio_net_ctrl_rss *)ctrl->data;
> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
> - } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
> - hdr->class == VIRTIO_NET_CTRL_MAC ||
> - hdr->class == VIRTIO_NET_CTRL_VLAN) {
> + } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
> + ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
> + ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
> status = 0;
> }
>
> if (!status && dev->scvq)
> - status = virtio_send_command(&dev->scvq->cq,
> - (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
> + status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
>
> /* Update status */
> - *(virtio_net_ctrl_ack *)(uintptr_t)
> - vring->desc[idx_status].addr = status;
> + ctrl->status = status;
>
> /* Update used descriptor */
> vring->desc[idx_hdr].id = vring->desc[idx_status].id;
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
> index bf9de36d8f..ae6593ba0b 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -198,6 +198,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
> sizeof(struct vring_packed_desc_event),
> VIRTIO_VRING_ALIGN);
> vring->num = vq->vq_nentries;
> + vring->desc_iova = vq->vq_ring_mem;
> vring->desc = (void *)(uintptr_t)desc_addr;
> vring->driver = (void *)(uintptr_t)avail_addr;
> vring->device = (void *)(uintptr_t)used_addr;
> @@ -221,6 +222,7 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
> VIRTIO_VRING_ALIGN);
>
> dev->vrings.split[queue_idx].num = vq->vq_nentries;
> + dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
> dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
> dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
> dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
> @@ -689,7 +691,13 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
> * Virtio-user requires using virtual addresses for the descriptors
> * buffers, whatever other devices require
> */
> - hw->use_va = true;
> + if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
> + /* VDPA backend requires using iova for the buffers to make it
> + * work in IOVA as PA mode also.
> + */
> + hw->use_va = false;
> + else
> + hw->use_va = true;
>
> /* previously called by pci probing for physical dev */
> if (eth_virtio_dev_init(eth_dev) < 0) {
> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
> index 6f419665f1..cf46abfd06 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -282,13 +282,13 @@ virtio_init_vring(struct virtqueue *vq)
> vq->vq_free_cnt = vq->vq_nentries;
> memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
> if (virtio_with_packed_queue(vq->hw)) {
> - vring_init_packed(&vq->vq_packed.ring, ring_mem,
> + vring_init_packed(&vq->vq_packed.ring, ring_mem, vq->vq_ring_mem,
> VIRTIO_VRING_ALIGN, size);
> vring_desc_init_packed(vq, size);
> } else {
> struct vring *vr = &vq->vq_split.ring;
>
> - vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
> + vring_init_split(vr, ring_mem, vq->vq_ring_mem, VIRTIO_VRING_ALIGN, size);
> vring_desc_init_split(vr->desc, size);
> }
> /*
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend
@ 2024-02-27 5:56 Srujana Challa
2024-02-27 8:56 ` Maxime Coquelin
0 siblings, 1 reply; 4+ messages in thread
From: Srujana Challa @ 2024-02-27 5:56 UTC (permalink / raw)
To: dev, maxime.coquelin, chenbox; +Cc: jerinj, ndabilpuram, vattunuru, schalla
Disable use_va flag for VDPA backend type and fixes the issues
with shadow control command processing, when it is disabled.
This will help to make virtio user driver works in IOVA
as PA mode for vDPA backend.
Signed-off-by: Srujana Challa <schalla@marvell.com>
---
drivers/net/virtio/virtio_ring.h | 12 ++-
.../net/virtio/virtio_user/virtio_user_dev.c | 94 ++++++++++---------
drivers/net/virtio/virtio_user_ethdev.c | 10 +-
drivers/net/virtio/virtqueue.c | 4 +-
4 files changed, 69 insertions(+), 51 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b73b..998605dbb5 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -83,6 +83,7 @@ struct vring_packed_desc_event {
struct vring_packed {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_packed_desc *desc;
struct vring_packed_desc_event *driver;
struct vring_packed_desc_event *device;
@@ -90,6 +91,7 @@ struct vring_packed {
struct vring {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
@@ -149,11 +151,12 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
return size;
}
static inline void
-vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
+ vr->desc_iova = iova;
vr->avail = (struct vring_avail *) (p +
num * sizeof(struct vring_desc));
vr->used = (void *)
@@ -161,11 +164,12 @@ vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
}
static inline void
-vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_packed_desc *)p;
+ vr->desc_iova = iova;
vr->driver = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
vr->device = (struct vring_packed_desc_event *)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index d395fc1676..8ad10e6354 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -62,6 +62,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
struct vhost_vring_state state;
struct vring *vring = &dev->vrings.split[queue_sel];
struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+ uint64_t desc_addr, avail_addr, used_addr;
struct vhost_vring_addr addr = {
.index = queue_sel,
.log_guest_addr = 0,
@@ -81,16 +82,23 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
}
if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
- addr.desc_user_addr =
- (uint64_t)(uintptr_t)pq_vring->desc;
- addr.avail_user_addr =
- (uint64_t)(uintptr_t)pq_vring->driver;
- addr.used_user_addr =
- (uint64_t)(uintptr_t)pq_vring->device;
+ desc_addr = pq_vring->desc_iova;
+ avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
} else {
- addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
- addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
- addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ desc_addr = vring->desc_iova;
+ avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
}
state.index = queue_sel;
@@ -885,11 +893,11 @@ static uint32_t
virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
- uint16_t i, idx_data, idx_status;
+ uint16_t i, idx_data;
uint32_t n_descs = 0;
int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
+ struct virtio_pmd_ctrl *ctrl;
/* locate desc for header, data, and status */
idx_data = vring->desc[idx_hdr].next;
@@ -902,34 +910,33 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
n_descs++;
}
- /* locate desc for status */
- idx_status = i;
n_descs++;
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t *queues;
- queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
- status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ queues = (uint16_t *)ctrl->data;
+ status = virtio_user_handle_mq(dev, *queues);
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+ ctrl->status = status;
return n_descs;
}
@@ -948,7 +955,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
struct vring_packed *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
+ struct virtio_pmd_ctrl *ctrl;
virtio_net_ctrl_ack status = ~0;
uint16_t idx_data, idx_status;
/* initialize to one, header is first */
@@ -971,32 +978,31 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t *queues;
- queues = *(uint16_t *)(uintptr_t)
- vring->desc[idx_data].addr;
- status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ queues = (uint16_t *)ctrl->data;
+ status = virtio_user_handle_mq(dev, *queues);
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc[idx_status].addr = status;
+ ctrl->status = status;
/* Update used descriptor */
vring->desc[idx_hdr].id = vring->desc[idx_status].id;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index bf9de36d8f..ae6593ba0b 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -198,6 +198,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
sizeof(struct vring_packed_desc_event),
VIRTIO_VRING_ALIGN);
vring->num = vq->vq_nentries;
+ vring->desc_iova = vq->vq_ring_mem;
vring->desc = (void *)(uintptr_t)desc_addr;
vring->driver = (void *)(uintptr_t)avail_addr;
vring->device = (void *)(uintptr_t)used_addr;
@@ -221,6 +222,7 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
VIRTIO_VRING_ALIGN);
dev->vrings.split[queue_idx].num = vq->vq_nentries;
+ dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
@@ -689,7 +691,13 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
* Virtio-user requires using virtual addresses for the descriptors
* buffers, whatever other devices require
*/
- hw->use_va = true;
+ if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
+ /* VDPA backend requires using iova for the buffers to make it
+ * work in IOVA as PA mode also.
+ */
+ hw->use_va = false;
+ else
+ hw->use_va = true;
/* previously called by pci probing for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 6f419665f1..cf46abfd06 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -282,13 +282,13 @@ virtio_init_vring(struct virtqueue *vq)
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (virtio_with_packed_queue(vq->hw)) {
- vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ vring_init_packed(&vq->vq_packed.ring, ring_mem, vq->vq_ring_mem,
VIRTIO_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, vq->vq_ring_mem, VIRTIO_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
/*
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend
@ 2024-02-26 10:04 Srujana Challa
0 siblings, 0 replies; 4+ messages in thread
From: Srujana Challa @ 2024-02-26 10:04 UTC (permalink / raw)
To: dev, maxime.coquelin, chenbox; +Cc: jerinj, ndabilpuram, vattunuru, schalla
Disable use_va flag for VDPA backend type and fixes the issues
with shadow control command processing, when it is disabled.
This will help to make virtio user driver works in IOVA
as PA mode for vDPA backend.
Signed-off-by: Srujana Challa <schalla@marvell.com>
---
drivers/net/virtio/virtio_ring.h | 12 ++-
.../net/virtio/virtio_user/virtio_user_dev.c | 86 ++++++++++---------
drivers/net/virtio/virtio_user_ethdev.c | 10 ++-
drivers/net/virtio/virtqueue.c | 4 +-
4 files changed, 65 insertions(+), 47 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b73b..998605dbb5 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -83,6 +83,7 @@ struct vring_packed_desc_event {
struct vring_packed {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_packed_desc *desc;
struct vring_packed_desc_event *driver;
struct vring_packed_desc_event *device;
@@ -90,6 +91,7 @@ struct vring_packed {
struct vring {
unsigned int num;
+ rte_iova_t desc_iova;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
@@ -149,11 +151,12 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
return size;
}
static inline void
-vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
+ vr->desc_iova = iova;
vr->avail = (struct vring_avail *) (p +
num * sizeof(struct vring_desc));
vr->used = (void *)
@@ -161,11 +164,12 @@ vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
}
static inline void
-vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
- unsigned int num)
+vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
+ unsigned long align, unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_packed_desc *)p;
+ vr->desc_iova = iova;
vr->driver = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
vr->device = (struct vring_packed_desc_event *)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index d395fc1676..55e71e4842 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -62,6 +62,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
struct vhost_vring_state state;
struct vring *vring = &dev->vrings.split[queue_sel];
struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+ uint64_t desc_addr, avail_addr, used_addr;
struct vhost_vring_addr addr = {
.index = queue_sel,
.log_guest_addr = 0,
@@ -81,16 +82,23 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
}
if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
- addr.desc_user_addr =
- (uint64_t)(uintptr_t)pq_vring->desc;
- addr.avail_user_addr =
- (uint64_t)(uintptr_t)pq_vring->driver;
- addr.used_user_addr =
- (uint64_t)(uintptr_t)pq_vring->device;
+ desc_addr = pq_vring->desc_iova;
+ avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
} else {
- addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
- addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
- addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ desc_addr = vring->desc_iova;
+ avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+ VIRTIO_VRING_ALIGN);
+
+ addr.desc_user_addr = desc_addr;
+ addr.avail_user_addr = avail_addr;
+ addr.used_user_addr = used_addr;
}
state.index = queue_sel;
@@ -885,11 +893,11 @@ static uint32_t
virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
- uint16_t i, idx_data, idx_status;
+ uint16_t i, idx_data;
uint32_t n_descs = 0;
int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
+ struct virtio_pmd_ctrl *ctrl;
/* locate desc for header, data, and status */
idx_data = vring->desc[idx_hdr].next;
@@ -902,34 +910,33 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
n_descs++;
}
- /* locate desc for status */
- idx_status = i;
n_descs++;
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
- queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ queues = *(uint16_t *)ctrl->data;
status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+ ctrl->status = status;
return n_descs;
}
@@ -948,7 +955,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
struct vring_packed *vring,
uint16_t idx_hdr)
{
- struct virtio_net_ctrl_hdr *hdr;
+ struct virtio_pmd_ctrl *ctrl;
virtio_net_ctrl_ack status = ~0;
uint16_t idx_data, idx_status;
/* initialize to one, header is first */
@@ -971,32 +978,31 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
- if (hdr->class == VIRTIO_NET_CTRL_MQ &&
- hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ /* Access control command via VA from CVQ */
+ ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+ if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
- queues = *(uint16_t *)(uintptr_t)
- vring->desc[idx_data].addr;
+ queues = *(uint16_t *)ctrl->data;
status = virtio_user_handle_mq(dev, queues);
- } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+ ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = (struct virtio_net_ctrl_rss *)ctrl->data;
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
- } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
- hdr->class == VIRTIO_NET_CTRL_MAC ||
- hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ } else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+ ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
status = 0;
}
if (!status && dev->scvq)
- status = virtio_send_command(&dev->scvq->cq,
- (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+ status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc[idx_status].addr = status;
+ ctrl->status = status;
/* Update used descriptor */
vring->desc[idx_hdr].id = vring->desc[idx_status].id;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index bf9de36d8f..ae6593ba0b 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -198,6 +198,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
sizeof(struct vring_packed_desc_event),
VIRTIO_VRING_ALIGN);
vring->num = vq->vq_nentries;
+ vring->desc_iova = vq->vq_ring_mem;
vring->desc = (void *)(uintptr_t)desc_addr;
vring->driver = (void *)(uintptr_t)avail_addr;
vring->device = (void *)(uintptr_t)used_addr;
@@ -221,6 +222,7 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
VIRTIO_VRING_ALIGN);
dev->vrings.split[queue_idx].num = vq->vq_nentries;
+ dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
@@ -689,7 +691,13 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
* Virtio-user requires using virtual addresses for the descriptors
* buffers, whatever other devices require
*/
- hw->use_va = true;
+ if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
+ /* VDPA backend requires using iova for the buffers to make it
+ * work in IOVA as PA mode also.
+ */
+ hw->use_va = false;
+ else
+ hw->use_va = true;
/* previously called by pci probing for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 6f419665f1..cf46abfd06 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -282,13 +282,13 @@ virtio_init_vring(struct virtqueue *vq)
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (virtio_with_packed_queue(vq->hw)) {
- vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ vring_init_packed(&vq->vq_packed.ring, ring_mem, vq->vq_ring_mem,
VIRTIO_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, vq->vq_ring_mem, VIRTIO_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
/*
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-02-27 8:56 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-26 11:40 [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend Srujana Challa
-- strict thread matches above, loose matches on Subject: below --
2024-02-27 5:56 Srujana Challa
2024-02-27 8:56 ` Maxime Coquelin
2024-02-26 10:04 Srujana Challa
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).