DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srujana Challa <schalla@marvell.com>
To: <dev@dpdk.org>, <maxime.coquelin@redhat.com>, <chenbox@nvidia.com>
Cc: <jerinj@marvell.com>, <ndabilpuram@marvell.com>,
	<vattunuru@marvell.com>,  <schalla@marvell.com>
Subject: [PATCH] net/virtio-user: support IOVA as PA mode for vDPA backend
Date: Mon, 26 Feb 2024 15:34:39 +0530	[thread overview]
Message-ID: <20240226100439.2127008-1-schalla@marvell.com> (raw)

Disable use_va flag for VDPA backend type and fixes the issues
with shadow control command processing, when it is disabled.
This will help to make virtio user driver works in IOVA
as PA mode for vDPA backend.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/net/virtio/virtio_ring.h              | 12 ++-
 .../net/virtio/virtio_user/virtio_user_dev.c  | 86 ++++++++++---------
 drivers/net/virtio/virtio_user_ethdev.c       | 10 ++-
 drivers/net/virtio/virtqueue.c                |  4 +-
 4 files changed, 65 insertions(+), 47 deletions(-)

diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b73b..998605dbb5 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -83,6 +83,7 @@ struct vring_packed_desc_event {
 
 struct vring_packed {
 	unsigned int num;
+	rte_iova_t desc_iova;
 	struct vring_packed_desc *desc;
 	struct vring_packed_desc_event *driver;
 	struct vring_packed_desc_event *device;
@@ -90,6 +91,7 @@ struct vring_packed {
 
 struct vring {
 	unsigned int num;
+	rte_iova_t desc_iova;
 	struct vring_desc  *desc;
 	struct vring_avail *avail;
 	struct vring_used  *used;
@@ -149,11 +151,12 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
 	return size;
 }
 static inline void
-vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
-	 unsigned int num)
+vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
+		 unsigned long align, unsigned int num)
 {
 	vr->num = num;
 	vr->desc = (struct vring_desc *) p;
+	vr->desc_iova = iova;
 	vr->avail = (struct vring_avail *) (p +
 		num * sizeof(struct vring_desc));
 	vr->used = (void *)
@@ -161,11 +164,12 @@ vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
 }
 
 static inline void
-vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
-		 unsigned int num)
+vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
+		  unsigned long align, unsigned int num)
 {
 	vr->num = num;
 	vr->desc = (struct vring_packed_desc *)p;
+	vr->desc_iova = iova;
 	vr->driver = (struct vring_packed_desc_event *)(p +
 			vr->num * sizeof(struct vring_packed_desc));
 	vr->device = (struct vring_packed_desc_event *)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index d395fc1676..55e71e4842 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -62,6 +62,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	struct vhost_vring_state state;
 	struct vring *vring = &dev->vrings.split[queue_sel];
 	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
+	uint64_t desc_addr, avail_addr, used_addr;
 	struct vhost_vring_addr addr = {
 		.index = queue_sel,
 		.log_guest_addr = 0,
@@ -81,16 +82,23 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	}
 
 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
-		addr.desc_user_addr =
-			(uint64_t)(uintptr_t)pq_vring->desc;
-		addr.avail_user_addr =
-			(uint64_t)(uintptr_t)pq_vring->driver;
-		addr.used_user_addr =
-			(uint64_t)(uintptr_t)pq_vring->device;
+		desc_addr = pq_vring->desc_iova;
+		avail_addr = desc_addr + pq_vring->num * sizeof(struct vring_packed_desc);
+		used_addr =  RTE_ALIGN_CEIL(avail_addr + sizeof(struct vring_packed_desc_event),
+					    VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
 	} else {
-		addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
-		addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
-		addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+		desc_addr = vring->desc_iova;
+		avail_addr = desc_addr + vring->num * sizeof(struct vring_desc);
+		used_addr = RTE_ALIGN_CEIL((uintptr_t)(&vring->avail->ring[vring->num]),
+					   VIRTIO_VRING_ALIGN);
+
+		addr.desc_user_addr = desc_addr;
+		addr.avail_user_addr = avail_addr;
+		addr.used_user_addr = used_addr;
 	}
 
 	state.index = queue_sel;
@@ -885,11 +893,11 @@ static uint32_t
 virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
 			    uint16_t idx_hdr)
 {
-	struct virtio_net_ctrl_hdr *hdr;
 	virtio_net_ctrl_ack status = ~0;
-	uint16_t i, idx_data, idx_status;
+	uint16_t i, idx_data;
 	uint32_t n_descs = 0;
 	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
+	struct virtio_pmd_ctrl *ctrl;
 
 	/* locate desc for header, data, and status */
 	idx_data = vring->desc[idx_hdr].next;
@@ -902,34 +910,33 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
 		n_descs++;
 	}
 
-	/* locate desc for status */
-	idx_status = i;
 	n_descs++;
 
-	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
-	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
-	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+	/* Access control command via VA from CVQ */
+	ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+	if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+	    ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
 		uint16_t queues;
 
-		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+		queues = *(uint16_t *)ctrl->data;
 		status = virtio_user_handle_mq(dev, queues);
-	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+		   ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
 		struct virtio_net_ctrl_rss *rss;
 
-		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+		rss = (struct virtio_net_ctrl_rss *)ctrl->data;
 		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
-	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
-		   hdr->class == VIRTIO_NET_CTRL_MAC ||
-		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
+	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX  ||
+		   ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+		   ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
 		status = 0;
 	}
 
 	if (!status && dev->scvq)
-		status = virtio_send_command(&dev->scvq->cq,
-				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+		status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
 
 	/* Update status */
-	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+	ctrl->status = status;
 
 	return n_descs;
 }
@@ -948,7 +955,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
 				   struct vring_packed *vring,
 				   uint16_t idx_hdr)
 {
-	struct virtio_net_ctrl_hdr *hdr;
+	struct virtio_pmd_ctrl *ctrl;
 	virtio_net_ctrl_ack status = ~0;
 	uint16_t idx_data, idx_status;
 	/* initialize to one, header is first */
@@ -971,32 +978,31 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
 		n_descs++;
 	}
 
-	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
-	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
-	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+	/* Access control command via VA from CVQ */
+	ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
+	if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+	    ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
 		uint16_t queues;
 
-		queues = *(uint16_t *)(uintptr_t)
-				vring->desc[idx_data].addr;
+		queues = *(uint16_t *)ctrl->data;
 		status = virtio_user_handle_mq(dev, queues);
-	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
+		   ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
 		struct virtio_net_ctrl_rss *rss;
 
-		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+		rss = (struct virtio_net_ctrl_rss *)ctrl->data;
 		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
-	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
-		   hdr->class == VIRTIO_NET_CTRL_MAC ||
-		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
+	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX  ||
+		   ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
+		   ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
 		status = 0;
 	}
 
 	if (!status && dev->scvq)
-		status = virtio_send_command(&dev->scvq->cq,
-				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
+		status = virtio_send_command(&dev->scvq->cq, ctrl, dlen, nb_dlen);
 
 	/* Update status */
-	*(virtio_net_ctrl_ack *)(uintptr_t)
-		vring->desc[idx_status].addr = status;
+	ctrl->status = status;
 
 	/* Update used descriptor */
 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index bf9de36d8f..ae6593ba0b 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -198,6 +198,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
 			   sizeof(struct vring_packed_desc_event),
 			   VIRTIO_VRING_ALIGN);
 	vring->num = vq->vq_nentries;
+	vring->desc_iova = vq->vq_ring_mem;
 	vring->desc = (void *)(uintptr_t)desc_addr;
 	vring->driver = (void *)(uintptr_t)avail_addr;
 	vring->device = (void *)(uintptr_t)used_addr;
@@ -221,6 +222,7 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
 				   VIRTIO_VRING_ALIGN);
 
 	dev->vrings.split[queue_idx].num = vq->vq_nentries;
+	dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem;
 	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
 	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
 	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
@@ -689,7 +691,13 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
 	 * Virtio-user requires using virtual addresses for the descriptors
 	 * buffers, whatever other devices require
 	 */
-	hw->use_va = true;
+	if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
+		/* VDPA backend requires using iova for the buffers to make it
+		 * work in IOVA as PA mode also.
+		 */
+		hw->use_va = false;
+	else
+		hw->use_va = true;
 
 	/* previously called by pci probing for physical dev */
 	if (eth_virtio_dev_init(eth_dev) < 0) {
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 6f419665f1..cf46abfd06 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -282,13 +282,13 @@ virtio_init_vring(struct virtqueue *vq)
 	vq->vq_free_cnt = vq->vq_nentries;
 	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
 	if (virtio_with_packed_queue(vq->hw)) {
-		vring_init_packed(&vq->vq_packed.ring, ring_mem,
+		vring_init_packed(&vq->vq_packed.ring, ring_mem, vq->vq_ring_mem,
 				  VIRTIO_VRING_ALIGN, size);
 		vring_desc_init_packed(vq, size);
 	} else {
 		struct vring *vr = &vq->vq_split.ring;
 
-		vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+		vring_init_split(vr, ring_mem, vq->vq_ring_mem, VIRTIO_VRING_ALIGN, size);
 		vring_desc_init_split(vr->desc, size);
 	}
 	/*
-- 
2.25.1


             reply	other threads:[~2024-02-26 10:04 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-26 10:04 Srujana Challa [this message]
2024-02-26 11:40 Srujana Challa
2024-02-27  5:56 Srujana Challa
2024-02-27  8:56 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240226100439.2127008-1-schalla@marvell.com \
    --to=schalla@marvell.com \
    --cc=chenbox@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=ndabilpuram@marvell.com \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).