DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving
@ 2020-09-22 10:29 Li RongQing
  2020-09-22 10:29 ` [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion Li RongQing
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Li RongQing @ 2020-09-22 10:29 UTC (permalink / raw)
  To: dev

Current Rx round robin policy for the slaves has two issue:

1. active_slave in bond_dev_private is shared by multiple PMDS
which maybe cause some slave Rx hungry, for example, there
is two PMD and two slave port, both PMDs start to receive, and
see that active_slave is 0, and receive from slave 0, after
complete, they increase active_slave by one, totally active_slave
are increased by two, next time, they will start to receive
from slave 0 again, at last, slave 1 maybe drop packets during
to not be polled by PMD

2. active_slave is shared and written by multiple PMD in RX path
for every time RX, this is a kind of cache false share, low
performance.

so move active_slave from bond_dev_private to bond_rx_queue
make it as per queue variable

Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Dongsheng Rong <rongdongsheng@baidu.com>
---
 drivers/net/bonding/eth_bond_private.h |  3 ++-
 drivers/net/bonding/rte_eth_bond_api.c |  6 ------
 drivers/net/bonding/rte_eth_bond_pmd.c | 14 +++++++-------
 3 files changed, 9 insertions(+), 14 deletions(-)

diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index c9b2d0fe4..af92a4c52 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -50,6 +50,8 @@ extern const struct rte_flow_ops bond_flow_ops;
 /** Port Queue Mapping Structure */
 struct bond_rx_queue {
 	uint16_t queue_id;
+	/**< Next active_slave to poll */
+	uint16_t active_slave;
 	/**< Queue Id */
 	struct bond_dev_private *dev_private;
 	/**< Reference to eth_dev private structure */
@@ -132,7 +134,6 @@ struct bond_dev_private {
 	uint16_t nb_rx_queues;			/**< Total number of rx queues */
 	uint16_t nb_tx_queues;			/**< Total number of tx queues*/
 
-	uint16_t active_slave;		/**< Next active_slave to poll */
 	uint16_t active_slave_count;		/**< Number of active slaves */
 	uint16_t active_slaves[RTE_MAX_ETHPORTS];    /**< Active slave list */
 
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 97c667e00..a4007fe07 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -129,12 +129,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
 	RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
 	internals->active_slave_count = active_count;
 
-	/* Resetting active_slave when reaches to max
-	 * no of slaves in active list
-	 */
-	if (internals->active_slave >= active_count)
-		internals->active_slave = 0;
-
 	if (eth_dev->data->dev_started) {
 		if (internals->mode == BONDING_MODE_8023AD) {
 			bond_mode_8023ad_start(eth_dev);
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index cdbd8151e..146d4dc4a 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -69,7 +69,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
 	internals = bd_rx_q->dev_private;
 	slave_count = internals->active_slave_count;
-	active_slave = internals->active_slave;
+	active_slave = bd_rx_q->active_slave;
 
 	for (i = 0; i < slave_count && nb_pkts; i++) {
 		uint16_t num_rx_slave;
@@ -86,8 +86,8 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			active_slave = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 	return num_rx_total;
 }
 
@@ -303,9 +303,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
 	memcpy(slaves, internals->active_slaves,
 			sizeof(internals->active_slaves[0]) * slave_count);
 
-	idx = internals->active_slave;
+	idx = bd_rx_q->active_slave;
 	if (idx >= slave_count) {
-		internals->active_slave = 0;
+		bd_rx_q->active_slave = 0;
 		idx = 0;
 	}
 	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
@@ -367,8 +367,8 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
 			idx = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 
 	return num_rx_total;
 }
-- 
2.16.2


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion
  2020-09-22 10:29 [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving Li RongQing
@ 2020-09-22 10:29 ` Li RongQing
  2020-09-30  5:40 ` [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving Li RongQing
  2020-10-09 13:44 ` [dpdk-dev] [PATCH 1/2] " Ferruh Yigit
  2 siblings, 0 replies; 8+ messages in thread
From: Li RongQing @ 2020-09-22 10:29 UTC (permalink / raw)
  To: dev

From: Dongsheng Rong <rongdongsheng@baidu.com>

it should be bond_rx_queue in bond_ethdev_rx_burst_alb in
RX path, not bond_tx_queue

Fixes: 06fe78b98ccd ("bond: add mode 6")

Signed-off-by: Dongsheng Rong <rongdongsheng@baidu.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 146d4dc4a..01ae8e902 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -534,8 +534,8 @@ mode6_debug(const char __rte_unused *info,
 static uint16_t
 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
-	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
-	struct bond_dev_private *internals = bd_tx_q->dev_private;
+	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+	struct bond_dev_private *internals = bd_rx_q->dev_private;
 	struct rte_ether_hdr *eth_h;
 	uint16_t ether_type, offset;
 	uint16_t nb_recv_pkts;
-- 
2.16.2


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving
  2020-09-22 10:29 [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving Li RongQing
  2020-09-22 10:29 ` [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion Li RongQing
@ 2020-09-30  5:40 ` Li RongQing
  2020-09-30  8:40   ` Wei Hu (Xavier)
  2020-10-09 13:44 ` [dpdk-dev] [PATCH 1/2] " Ferruh Yigit
  2 siblings, 1 reply; 8+ messages in thread
From: Li RongQing @ 2020-09-30  5:40 UTC (permalink / raw)
  To: dev

ping

thanks

-Li

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving
  2020-09-30  5:40 ` [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving Li RongQing
@ 2020-09-30  8:40   ` Wei Hu (Xavier)
  2020-09-30  9:12     ` Li,Rongqing
  2020-10-09 11:56     ` Li,Rongqing
  0 siblings, 2 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-30  8:40 UTC (permalink / raw)
  To: Li RongQing; +Cc: dev, Wei Hu (Xavier)

Hi, Li RongQing

Please add fixes statements in the commit log.

And for the patch [2/2] of this series:    Reviewed-by: Wei Hu (Xavier) 
<xavier.huwei@huawei.com>


On 2020/9/30 13:40, Li RongQing wrote:
> ping
>
> thanks
>
> -Li

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving
  2020-09-30  8:40   ` Wei Hu (Xavier)
@ 2020-09-30  9:12     ` Li,Rongqing
  2020-10-09 11:56     ` Li,Rongqing
  1 sibling, 0 replies; 8+ messages in thread
From: Li,Rongqing @ 2020-09-30  9:12 UTC (permalink / raw)
  To: Wei Hu (Xavier); +Cc: dev, Wei Hu (Xavier)

Fixes: ae2a04864a9a "(net/bonding: reduce slave starvation on Rx poll)"

Thanks

-Li

From: Wei Hu (Xavier) [mailto:huwei013@chinasoftinc.com]
Sent: Wednesday, September 30, 2020 4:41 PM
To: Li,Rongqing <lirongqing@baidu.com>
Cc: dev@dpdk.org; Wei Hu (Xavier) <xavier.huwei@huawei.com>
Subject: Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving


Hi, Li RongQing

Please add fixes statements in the commit log.

And for the patch [2/2] of this series:    Reviewed-by: Wei Hu (Xavier) <xavier.huwei@huawei.com><mailto:xavier.huwei@huawei.com>



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving
  2020-09-30  8:40   ` Wei Hu (Xavier)
  2020-09-30  9:12     ` Li,Rongqing
@ 2020-10-09 11:56     ` Li,Rongqing
  2020-10-09 12:04       ` Ferruh Yigit
  1 sibling, 1 reply; 8+ messages in thread
From: Li,Rongqing @ 2020-10-09 11:56 UTC (permalink / raw)
  To: Wei Hu (Xavier), ferruh.yigit; +Cc: dev, Wei Hu (Xavier)

Ping, should I resend this?

Thanks

-Li

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving
  2020-10-09 11:56     ` Li,Rongqing
@ 2020-10-09 12:04       ` Ferruh Yigit
  0 siblings, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2020-10-09 12:04 UTC (permalink / raw)
  To: Li,Rongqing, Wei Hu (Xavier); +Cc: dev, Wei Hu (Xavier)

On 10/9/2020 12:56 PM, Li,Rongqing wrote:
> Ping, should I resend this?
> 

No, I can add Fixes line while merging, thanks.


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving
  2020-09-22 10:29 [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving Li RongQing
  2020-09-22 10:29 ` [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion Li RongQing
  2020-09-30  5:40 ` [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving Li RongQing
@ 2020-10-09 13:44 ` Ferruh Yigit
  2 siblings, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2020-10-09 13:44 UTC (permalink / raw)
  To: Li RongQing, dev

On 9/22/2020 11:29 AM, Li RongQing wrote:
> Current Rx round robin policy for the slaves has two issue:
> 
> 1. active_slave in bond_dev_private is shared by multiple PMDS
> which maybe cause some slave Rx hungry, for example, there
> is two PMD and two slave port, both PMDs start to receive, and
> see that active_slave is 0, and receive from slave 0, after
> complete, they increase active_slave by one, totally active_slave
> are increased by two, next time, they will start to receive
> from slave 0 again, at last, slave 1 maybe drop packets during
> to not be polled by PMD
> 
> 2. active_slave is shared and written by multiple PMD in RX path
> for every time RX, this is a kind of cache false share, low
> performance.
> 
> so move active_slave from bond_dev_private to bond_rx_queue
> make it as per queue variable
> 
> Signed-off-by: Li RongQing <lirongqing@baidu.com>
> Signed-off-by: Dongsheng Rong <rongdongsheng@baidu.com>
 >

Fixes: ae2a04864a9a ("net/bonding: reduce slave starvation on Rx poll")
Cc: stable@dpdk.org

For series,
Reviewed-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>

Series applied to dpdk-next-net/main, thanks.



^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-10-09 13:44 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-22 10:29 [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving Li RongQing
2020-09-22 10:29 ` [dpdk-dev] [PATCH 2/2] net/bonding: fix RX queue conversion Li RongQing
2020-09-30  5:40 ` [dpdk-dev] [PATCH] net/bonding: fix a possible unbalance packet receiving Li RongQing
2020-09-30  8:40   ` Wei Hu (Xavier)
2020-09-30  9:12     ` Li,Rongqing
2020-10-09 11:56     ` Li,Rongqing
2020-10-09 12:04       ` Ferruh Yigit
2020-10-09 13:44 ` [dpdk-dev] [PATCH 1/2] " Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).