DPDK patches and discussions
 help / color / mirror / Atom feed
From: xuan.ding@intel.com
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, sunil.pai.g@intel.com,
	Xuan Ding <xuan.ding@intel.com>
Subject: [RFC 2/2] examples/vhost: use API to check inflight packets
Date: Wed, 16 Feb 2022 07:04:17 +0000	[thread overview]
Message-ID: <20220216070417.9597-3-xuan.ding@intel.com> (raw)
In-Reply-To: <20220216070417.9597-1-xuan.ding@intel.com>

From: Xuan Ding <xuan.ding@intel.com>

In async data path, call rte_vhost_async_get_inflight_thread_unsafe()
API to directly return the number of inflight packets instead of
maintaining a local variable.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 examples/vhost/main.c | 28 +++++++++++++++-------------
 examples/vhost/main.h |  1 -
 2 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3e784f5c6f..ba7ab23f4e 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -972,10 +972,8 @@ complete_async_pkts(struct vhost_dev *vdev)
 
 	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
 					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
-	if (complete_count) {
+	if (complete_count)
 		free_pkts(p_cpl, complete_count);
-		__atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
-	}
 
 }
 
@@ -1017,7 +1015,6 @@ drain_vhost(struct vhost_dev *vdev)
 
 		complete_async_pkts(vdev);
 		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
-		__atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
 
 		enqueue_fail = nr_xmit - ret;
 		if (enqueue_fail)
@@ -1346,7 +1343,6 @@ drain_eth_rx(struct vhost_dev *vdev)
 		complete_async_pkts(vdev);
 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
 					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
-		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
 
 		enqueue_fail = rx_count - enqueue_count;
 		if (enqueue_fail)
@@ -1518,14 +1514,17 @@ destroy_device(int vid)
 
 	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
 		uint16_t n_pkt = 0;
+		int pkts_inflight;
 		int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-		struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+		pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ);
+		struct rte_mbuf *m_cpl[pkts_inflight];
 
-		while (vdev->pkts_inflight) {
+		while (pkts_inflight) {
 			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
-						m_cpl, vdev->pkts_inflight, dma_id, 0);
+						m_cpl, pkts_inflight, dma_id, 0);
 			free_pkts(m_cpl, n_pkt);
-			__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
+										VIRTIO_RXQ);
 		}
 
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
@@ -1629,14 +1628,17 @@ vring_state_changed(int vid, uint16_t queue_id, int enable)
 	if (dma_bind[vid].dmas[queue_id].async_enabled) {
 		if (!enable) {
 			uint16_t n_pkt = 0;
+			int pkts_inflight;
+			pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id);
 			int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
-			struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+			struct rte_mbuf *m_cpl[pkts_inflight];
 
-			while (vdev->pkts_inflight) {
+			while (pkts_inflight) {
 				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
-							m_cpl, vdev->pkts_inflight, dma_id, 0);
+							m_cpl, pkts_inflight, dma_id, 0);
 				free_pkts(m_cpl, n_pkt);
-				__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+				pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
+											queue_id);
 			}
 		}
 	}
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index b4a453e77e..e7f395c3c9 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -52,7 +52,6 @@ struct vhost_dev {
 	uint64_t features;
 	size_t hdr_len;
 	uint16_t nr_vrings;
-	uint16_t pkts_inflight;
 	struct rte_vhost_memory *mem;
 	struct device_statistics stats;
 	TAILQ_ENTRY(vhost_dev) global_vdev_entry;
-- 
2.17.1


  parent reply	other threads:[~2022-02-16  7:05 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-16  7:04 [RFC 0/2] add unsafe API to get " xuan.ding
2022-02-16  7:04 ` [RFC 1/2] vhost: add unsafe API to check " xuan.ding
2022-03-28 15:05   ` Maxime Coquelin
2022-03-29  2:21     ` Ding, Xuan
2022-02-16  7:04 ` xuan.ding [this message]
2022-03-28 15:19   ` [RFC 2/2] examples/vhost: use " Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220216070417.9597-3-xuan.ding@intel.com \
    --to=xuan.ding@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=sunil.pai.g@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).