DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gaoxiang Liu <gaoxiangliu0@163.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, liugaoxiang@huawei.com,
	Gaoxiang Liu <gaoxiangliu0@163.com>
Subject: [dpdk-dev] [PATCH v3] net/vhost: merge vhost stats loop in vhost Tx/Rx
Date: Mon, 18 Oct 2021 07:19:52 +0800	[thread overview]
Message-ID: <20211017231952.162-1-gaoxiangliu0@163.com> (raw)
In-Reply-To: <20210928014348.1747-1-gaoxiangliu0@163.com>

To improve performance in vhost Tx/Rx, merge vhost stats loop.
eth_vhost_tx has 2 loop of send num iteraion.
It can be merge into one.
eth_vhost_rx has the same issue as Tx.

Signed-off-by: Gaoxiang Liu <gaoxiangliu0@163.com>
---

v2:
 * Fix coding style issues.

v3:
* add __rte_always_inline to vhost_update_single_packet_xstats.
---
 drivers/net/vhost/rte_eth_vhost.c | 64 ++++++++++++++-----------------
 1 file changed, 29 insertions(+), 35 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a202931e9a..021195ae57 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -335,39 +335,30 @@ vhost_count_xcast_packets(struct vhost_queue *vq,
 	}
 }
 
-static void
-vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
-			   uint16_t count, uint64_t nb_bytes,
-			   uint64_t nb_missed)
+static __rte_always_inline void
+vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
 {
 	uint32_t pkt_len = 0;
-	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
-	pstats->xstats[VHOST_BYTE] += nb_bytes;
-	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
-	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
-
-	for (i = 0; i < count ; i++) {
-		pstats->xstats[VHOST_PKT]++;
-		pkt_len = bufs[i]->pkt_len;
-		if (pkt_len == 64) {
-			pstats->xstats[VHOST_64_PKT]++;
-		} else if (pkt_len > 64 && pkt_len < 1024) {
-			index = (sizeof(pkt_len) * 8)
-				- __builtin_clz(pkt_len) - 5;
-			pstats->xstats[index]++;
-		} else {
-			if (pkt_len < 64)
-				pstats->xstats[VHOST_UNDERSIZE_PKT]++;
-			else if (pkt_len <= 1522)
-				pstats->xstats[VHOST_1024_TO_1522_PKT]++;
-			else if (pkt_len > 1522)
-				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
-		}
-		vhost_count_xcast_packets(vq, bufs[i]);
+	pstats->xstats[VHOST_PKT]++;
+	pkt_len = buf->pkt_len;
+	if (pkt_len == 64) {
+		pstats->xstats[VHOST_64_PKT]++;
+	} else if (pkt_len > 64 && pkt_len < 1024) {
+		index = (sizeof(pkt_len) * 8)
+			- __builtin_clz(pkt_len) - 5;
+		pstats->xstats[index]++;
+	} else {
+		if (pkt_len < 64)
+			pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+		else if (pkt_len <= 1522)
+			pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+		else if (pkt_len > 1522)
+			pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
 	}
+	vhost_count_xcast_packets(vq, buf);
 }
 
 static uint16_t
@@ -376,7 +367,6 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
-	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -411,11 +401,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		nb_bytes += bufs[i]->pkt_len;
-	}
+		r->stats.bytes += bufs[i]->pkt_len;
+		r->stats.xstats[VHOST_BYTE] += bufs[i]->pkt_len;
 
-	r->stats.bytes += nb_bytes;
-	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
+		vhost_update_single_packet_xstats(r, bufs[i]);
+	}
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -471,16 +461,20 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
-	for (i = 0; likely(i < nb_tx); i++)
+	for (i = 0; likely(i < nb_tx); i++) {
 		nb_bytes += bufs[i]->pkt_len;
+		vhost_update_single_packet_xstats(r, bufs[i]);
+	}
 
 	nb_missed = nb_bufs - nb_tx;
 
 	r->stats.pkts += nb_tx;
 	r->stats.bytes += nb_bytes;
-	r->stats.missed_pkts += nb_bufs - nb_tx;
+	r->stats.missed_pkts += nb_missed;
 
-	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
+	r->stats.xstats[VHOST_BYTE] += nb_bytes;
+	r->stats.xstats[VHOST_MISSED_PKT] += nb_missed;
+	r->stats.xstats[VHOST_UNICAST_PKT] += nb_missed;
 
 	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
 	 * ifHCOutBroadcastPkts counters are increased when packets are not
-- 
2.32.0



  parent reply	other threads:[~2021-10-17 23:20 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-26 12:56 [dpdk-dev] [PATCH] " Gaoxiang Liu
2021-09-27  1:30 ` [dpdk-dev] [PATCH v2] " Gaoxiang Liu
2021-09-28  1:43   ` Gaoxiang Liu
2021-10-06 10:42     ` Gaoxiang Liu
2021-10-15 12:16     ` Maxime Coquelin
2021-10-16  8:59       ` Gaoxiang Liu
2021-10-17 23:19     ` Gaoxiang Liu [this message]
2021-10-21 10:04       ` [dpdk-dev] [PATCH v3] " Maxime Coquelin
2021-10-21 12:34       ` Maxime Coquelin
2021-10-21 18:56       ` Ferruh Yigit
2021-10-22  1:35         ` Gaoxiang Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211017231952.162-1-gaoxiangliu0@163.com \
    --to=gaoxiangliu0@163.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=liugaoxiang@huawei.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).