From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A23DCA04B5; Tue, 22 Sep 2020 12:29:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 464FB1DBB8; Tue, 22 Sep 2020 12:29:37 +0200 (CEST) Received: from tc-sys-mailedm04.tc.baidu.com (mx56.baidu.com [61.135.168.56]) by dpdk.org (Postfix) with ESMTP id E385B1DB4C for ; Tue, 22 Sep 2020 12:29:34 +0200 (CEST) Received: from localhost (cp01-cos-dev01.cp01.baidu.com [10.92.119.46]) by tc-sys-mailedm04.tc.baidu.com (Postfix) with ESMTP id B7867236C002 for ; Tue, 22 Sep 2020 18:29:29 +0800 (CST) From: Li RongQing To: dev@dpdk.org Date: Tue, 22 Sep 2020 18:29:31 +0800 Message-Id: <1600770572-22716-1-git-send-email-lirongqing@baidu.com> X-Mailer: git-send-email 1.7.1 Subject: [dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet receiving X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Current Rx round robin policy for the slaves has two issue: 1. active_slave in bond_dev_private is shared by multiple PMDS which maybe cause some slave Rx hungry, for example, there is two PMD and two slave port, both PMDs start to receive, and see that active_slave is 0, and receive from slave 0, after complete, they increase active_slave by one, totally active_slave are increased by two, next time, they will start to receive from slave 0 again, at last, slave 1 maybe drop packets during to not be polled by PMD 2. active_slave is shared and written by multiple PMD in RX path for every time RX, this is a kind of cache false share, low performance. so move active_slave from bond_dev_private to bond_rx_queue make it as per queue variable Signed-off-by: Li RongQing Signed-off-by: Dongsheng Rong --- drivers/net/bonding/eth_bond_private.h | 3 ++- drivers/net/bonding/rte_eth_bond_api.c | 6 ------ drivers/net/bonding/rte_eth_bond_pmd.c | 14 +++++++------- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h index c9b2d0fe4..af92a4c52 100644 --- a/drivers/net/bonding/eth_bond_private.h +++ b/drivers/net/bonding/eth_bond_private.h @@ -50,6 +50,8 @@ extern const struct rte_flow_ops bond_flow_ops; /** Port Queue Mapping Structure */ struct bond_rx_queue { uint16_t queue_id; + /**< Next active_slave to poll */ + uint16_t active_slave; /**< Queue Id */ struct bond_dev_private *dev_private; /**< Reference to eth_dev private structure */ @@ -132,7 +134,6 @@ struct bond_dev_private { uint16_t nb_rx_queues; /**< Total number of rx queues */ uint16_t nb_tx_queues; /**< Total number of tx queues*/ - uint16_t active_slave; /**< Next active_slave to poll */ uint16_t active_slave_count; /**< Number of active slaves */ uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */ diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index 97c667e00..a4007fe07 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -129,12 +129,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id) RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves)); internals->active_slave_count = active_count; - /* Resetting active_slave when reaches to max - * no of slaves in active list - */ - if (internals->active_slave >= active_count) - internals->active_slave = 0; - if (eth_dev->data->dev_started) { if (internals->mode == BONDING_MODE_8023AD) { bond_mode_8023ad_start(eth_dev); diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index cdbd8151e..146d4dc4a 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -69,7 +69,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; slave_count = internals->active_slave_count; - active_slave = internals->active_slave; + active_slave = bd_rx_q->active_slave; for (i = 0; i < slave_count && nb_pkts; i++) { uint16_t num_rx_slave; @@ -86,8 +86,8 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) active_slave = 0; } - if (++internals->active_slave >= slave_count) - internals->active_slave = 0; + if (++bd_rx_q->active_slave >= slave_count) + bd_rx_q->active_slave = 0; return num_rx_total; } @@ -303,9 +303,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); - idx = internals->active_slave; + idx = bd_rx_q->active_slave; if (idx >= slave_count) { - internals->active_slave = 0; + bd_rx_q->active_slave = 0; idx = 0; } for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { @@ -367,8 +367,8 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, idx = 0; } - if (++internals->active_slave >= slave_count) - internals->active_slave = 0; + if (++bd_rx_q->active_slave >= slave_count) + bd_rx_q->active_slave = 0; return num_rx_total; } -- 2.16.2