DPDK patches and discussions
 help / color / mirror / Atom feed
From: Li RongQing <lirongqing@baidu.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving
Date: Tue, 22 Sep 2020 18:29:31 +0800	[thread overview]
Message-ID: <1600770572-22716-1-git-send-email-lirongqing@baidu.com> (raw)

Current Rx round robin policy for the slaves has two issue:

1. active_slave in bond_dev_private is shared by multiple PMDS
which maybe cause some slave Rx hungry, for example, there
is two PMD and two slave port, both PMDs start to receive, and
see that active_slave is 0, and receive from slave 0, after
complete, they increase active_slave by one, totally active_slave
are increased by two, next time, they will start to receive
from slave 0 again, at last, slave 1 maybe drop packets during
to not be polled by PMD

2. active_slave is shared and written by multiple PMD in RX path
for every time RX, this is a kind of cache false share, low
performance.

so move active_slave from bond_dev_private to bond_rx_queue
make it as per queue variable

Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Dongsheng Rong <rongdongsheng@baidu.com>
---
 drivers/net/bonding/eth_bond_private.h |  3 ++-
 drivers/net/bonding/rte_eth_bond_api.c |  6 ------
 drivers/net/bonding/rte_eth_bond_pmd.c | 14 +++++++-------
 3 files changed, 9 insertions(+), 14 deletions(-)

diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index c9b2d0fe4..af92a4c52 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -50,6 +50,8 @@ extern const struct rte_flow_ops bond_flow_ops;
 /** Port Queue Mapping Structure */
 struct bond_rx_queue {
 	uint16_t queue_id;
+	/**< Next active_slave to poll */
+	uint16_t active_slave;
 	/**< Queue Id */
 	struct bond_dev_private *dev_private;
 	/**< Reference to eth_dev private structure */
@@ -132,7 +134,6 @@ struct bond_dev_private {
 	uint16_t nb_rx_queues;			/**< Total number of rx queues */
 	uint16_t nb_tx_queues;			/**< Total number of tx queues*/
 
-	uint16_t active_slave;		/**< Next active_slave to poll */
 	uint16_t active_slave_count;		/**< Number of active slaves */
 	uint16_t active_slaves[RTE_MAX_ETHPORTS];    /**< Active slave list */
 
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 97c667e00..a4007fe07 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -129,12 +129,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
 	RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
 	internals->active_slave_count = active_count;
 
-	/* Resetting active_slave when reaches to max
-	 * no of slaves in active list
-	 */
-	if (internals->active_slave >= active_count)
-		internals->active_slave = 0;
-
 	if (eth_dev->data->dev_started) {
 		if (internals->mode == BONDING_MODE_8023AD) {
 			bond_mode_8023ad_start(eth_dev);
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index cdbd8151e..146d4dc4a 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -69,7 +69,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
 	internals = bd_rx_q->dev_private;
 	slave_count = internals->active_slave_count;
-	active_slave = internals->active_slave;
+	active_slave = bd_rx_q->active_slave;
 
 	for (i = 0; i < slave_count && nb_pkts; i++) {
 		uint16_t num_rx_slave;
@@ -86,8 +86,8 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			active_slave = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 	return num_rx_total;
 }
 
@@ -303,9 +303,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
 	memcpy(slaves, internals->active_slaves,
 			sizeof(internals->active_slaves[0]) * slave_count);
 
-	idx = internals->active_slave;
+	idx = bd_rx_q->active_slave;
 	if (idx >= slave_count) {
-		internals->active_slave = 0;
+		bd_rx_q->active_slave = 0;
 		idx = 0;
 	}
 	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
@@ -367,8 +367,8 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
 			idx = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 
 	return num_rx_total;
 }
-- 
2.16.2


             reply	other threads:[~2020-09-22 10:29 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-22 10:29 Li RongQing [this message]
2020-09-22 10:29 ` [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion Li RongQing
2020-09-30  5:40 ` [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving Li RongQing
2020-09-30  8:40   ` Wei Hu (Xavier)
2020-09-30  9:12     ` Li,Rongqing
2020-10-09 11:56     ` Li,Rongqing
2020-10-09 12:04       ` Ferruh Yigit
2020-10-09 13:44 ` [dpdk-dev] [PATCH 1/2] " Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1600770572-22716-1-git-send-email-lirongqing@baidu.com \
    --to=lirongqing@baidu.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).