DPDK patches and discussions
 help / color / mirror / Atom feed
From: xuan.ding@intel.com
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, cheng1.jiang@intel.com,
	sunil.pai.g@intel.com, Xuan Ding <xuan.ding@intel.com>
Subject: [PATCH v1 2/2] examples/vhost: use API to check inflight packets
Date: Fri,  8 Apr 2022 10:22:14 +0000	[thread overview]
Message-ID: <20220408102214.11994-3-xuan.ding@intel.com> (raw)
In-Reply-To: <20220408102214.11994-1-xuan.ding@intel.com>

From: Xuan Ding <xuan.ding@intel.com>

In async data path, call rte_vhost_async_get_inflight_thread_unsafe()
API to directly return the number of inflight packets instead of
maintaining a local variable.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 examples/vhost/main.c | 28 +++++++++++++++-------------
 examples/vhost/main.h |  1 -
 2 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d94fabb060..c4d46de1c5 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -994,10 +994,8 @@ complete_async_pkts(struct vhost_dev *vdev)
 
 	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
 					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
-	if (complete_count) {
+	if (complete_count)
 		free_pkts(p_cpl, complete_count);
-		__atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
-	}
 
 }
 
@@ -1039,7 +1037,6 @@ drain_vhost(struct vhost_dev *vdev)
 
 		complete_async_pkts(vdev);
 		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
-		__atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
 
 		enqueue_fail = nr_xmit - ret;
 		if (enqueue_fail)
@@ -1368,7 +1365,6 @@ drain_eth_rx(struct vhost_dev *vdev)
 		complete_async_pkts(vdev);
 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
 					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
-		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
 
 		enqueue_fail = rx_count - enqueue_count;
 		if (enqueue_fail)
@@ -1540,14 +1536,17 @@ destroy_device(int vid)
 
 	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
 		uint16_t n_pkt = 0;
+		int pkts_inflight;
 		int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-		struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+		pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ);
+		struct rte_mbuf *m_cpl[pkts_inflight];
 
-		while (vdev->pkts_inflight) {
+		while (pkts_inflight) {
 			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
-						m_cpl, vdev->pkts_inflight, dma_id, 0);
+						m_cpl, pkts_inflight, dma_id, 0);
 			free_pkts(m_cpl, n_pkt);
-			__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
+										VIRTIO_RXQ);
 		}
 
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
@@ -1651,14 +1650,17 @@ vring_state_changed(int vid, uint16_t queue_id, int enable)
 	if (dma_bind[vid].dmas[queue_id].async_enabled) {
 		if (!enable) {
 			uint16_t n_pkt = 0;
+			int pkts_inflight;
+			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id);
 			int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-			struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+			struct rte_mbuf *m_cpl[pkts_inflight];
 
-			while (vdev->pkts_inflight) {
+			while (pkts_inflight) {
 				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
-							m_cpl, vdev->pkts_inflight, dma_id, 0);
+							m_cpl, pkts_inflight, dma_id, 0);
 				free_pkts(m_cpl, n_pkt);
-				__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+				pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
+											queue_id);
 			}
 		}
 	}
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index b4a453e77e..e7f395c3c9 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -52,7 +52,6 @@ struct vhost_dev {
 	uint64_t features;
 	size_t hdr_len;
 	uint16_t nr_vrings;
-	uint16_t pkts_inflight;
 	struct rte_vhost_memory *mem;
 	struct device_statistics stats;
 	TAILQ_ENTRY(vhost_dev) global_vdev_entry;
-- 
2.17.1


  parent reply	other threads:[~2022-04-08 10:22 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-08 10:22 [PATCH v1 0/2] vhost: add unsafe API to get DMA " xuan.ding
2022-04-08 10:22 ` [PATCH v1 1/2] vhost: add unsafe API to check " xuan.ding
2022-05-05 19:21   ` Maxime Coquelin
2022-05-06  1:45     ` Ding, Xuan
2022-04-08 10:22 ` xuan.ding [this message]
2022-05-05 19:55 ` [PATCH v1 0/2] vhost: add unsafe API to get DMA " Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220408102214.11994-3-xuan.ding@intel.com \
    --to=xuan.ding@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=sunil.pai.g@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).