DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srujana Challa <schalla@marvell.com>
To: <dev@dpdk.org>, <maxime.coquelin@redhat.com>, <chenbox@nvidia.com>
Cc: <jerinj@marvell.com>, <ndabilpuram@marvell.com>,
	<vattunuru@marvell.com>,  <schalla@marvell.com>
Subject: [PATCH v3 1/3] net/virtio_user: convert cq descriptor IOVA address to Virtual address
Date: Wed, 3 Jul 2024 15:33:51 +0530	[thread overview]
Message-ID: <20240703100353.2243038-2-schalla@marvell.com> (raw)
In-Reply-To: <20240703100353.2243038-1-schalla@marvell.com>

This patch modifies the code to convert descriptor buffer IOVA
addresses to virtual addresses during the processing of shadow
control queue when IOVA mode is PA. This change enables Virtio-user
to operate with IOVA as the descriptor buffer address.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 .../net/virtio/virtio_user/virtio_user_dev.c  | 33 ++++++++++++-------
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 1365c8a5c8..7f35f4b06b 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
 
 #define CVQ_MAX_DATA_DESCS 32
 
+static inline void *
+virtio_user_iova2virt(rte_iova_t iova)
+{
+	if (rte_eal_iova_mode() == RTE_IOVA_PA)
+		return rte_mem_iova2virt(iova);
+	else
+		return (void *)(uintptr_t)iova;
+}
+
 static uint32_t
 virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
 			    uint16_t idx_hdr)
@@ -921,17 +930,18 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
 	idx_status = i;
 	n_descs++;
 
-	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+	hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
-		uint16_t queues;
+		uint16_t queues, *addr;
 
-		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+		addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+		queues = *addr;
 		status = virtio_user_handle_mq(dev, queues);
 	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
 		struct virtio_net_ctrl_rss *rss;
 
-		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+		rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
 		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -944,7 +954,7 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
 
 	/* Update status */
-	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+	*(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
 
 	return n_descs;
 }
@@ -986,18 +996,18 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
 		n_descs++;
 	}
 
-	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+	hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
-		uint16_t queues;
+		uint16_t queues, *addr;
 
-		queues = *(uint16_t *)(uintptr_t)
-				vring->desc[idx_data].addr;
+		addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+		queues = *addr;
 		status = virtio_user_handle_mq(dev, queues);
 	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
 		struct virtio_net_ctrl_rss *rss;
 
-		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+		rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
 		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -1010,8 +1020,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
 
 	/* Update status */
-	*(virtio_net_ctrl_ack *)(uintptr_t)
-		vring->desc[idx_status].addr = status;
+	*(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
 
 	/* Update used descriptor */
 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
-- 
2.25.1


  reply	other threads:[~2024-07-03 10:04 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-29 13:29 [PATCH v2 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-02-29 13:29 ` [PATCH v2 1/3] net/virtio_user: avoid cq descriptor buffer address accessing Srujana Challa
2024-06-28  8:07   ` Maxime Coquelin
2024-07-02 11:09     ` [EXTERNAL] " Srujana Challa
2024-07-02 12:41       ` Maxime Coquelin
2024-07-03 10:03   ` [PATCH v3 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-07-03 10:03     ` Srujana Challa [this message]
2024-07-03 10:19       ` [PATCH v3 1/3] net/virtio_user: convert cq descriptor IOVA address to Virtual address Jerin Jacob
2024-07-03 13:28         ` Maxime Coquelin
2024-07-03 13:40       ` Maxime Coquelin
2024-07-03 10:03     ` [PATCH v3 2/3] net/virtio: store desc IOVA address in vring data structure Srujana Challa
2024-07-03 13:41       ` Maxime Coquelin
2024-07-03 10:03     ` [PATCH v3 3/3] net/virtio_user: support sharing vq descriptor IOVA to the backend Srujana Challa
2024-07-03 13:41       ` Maxime Coquelin
2024-07-03 14:34     ` [PATCH v3 0/3] net/virtio: support IOVA as PA mode for vDPA backend Maxime Coquelin
2024-02-29 13:29 ` [PATCH v2 2/3] net/virtio: store desc IOVA address in vring data structure Srujana Challa
2024-02-29 13:29 ` [PATCH v2 3/3] net/virtio_user: support sharing vq descriptor IOVA to the backend Srujana Challa
2024-06-19  9:39 ` [PATCH v2 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-06-28 13:33 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240703100353.2243038-2-schalla@marvell.com \
    --to=schalla@marvell.com \
    --cc=chenbox@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=ndabilpuram@marvell.com \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).