DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bernard Iremonger <bernard.iremonger@intel.com>
To: dev@dpdk.org
Cc: declan.doherty@intel.com, konstantin.ananyev@intel.com,
	Bernard Iremonger <bernard.iremonger@intel.com>
Subject: [dpdk-dev] [PATCH v2 2/6] bonding: grab queue spinlocks in slave add and remove
Date: Thu, 26 May 2016 17:38:43 +0100	[thread overview]
Message-ID: <1464280727-25752-3-git-send-email-bernard.iremonger@intel.com> (raw)
In-Reply-To: <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com>

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 drivers/net/bonding/rte_eth_bond_api.c | 52 ++++++++++++++++++++++++++++++++--
 1 file changed, 49 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 53df9fe..006c901 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -437,8 +437,10 @@ rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
 {
 	struct rte_eth_dev *bonded_eth_dev;
 	struct bond_dev_private *internals;
-
+	struct bond_tx_queue *bd_tx_q;
+	struct bond_rx_queue *bd_rx_q;
 	int retval;
+	uint16_t i;
 
 	/* Verify that port id's are valid bonded and slave ports */
 	if (valid_bonded_port_id(bonded_port_id) != 0)
@@ -448,11 +450,30 @@ rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
 	internals = bonded_eth_dev->data->dev_private;
 
 	rte_spinlock_lock(&internals->lock);
+	if (bonded_eth_dev->data->dev_started) {
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_rx_q = bonded_eth_dev->data->rx_queues[i];
+			rte_spinlock_lock(&bd_rx_q->lock);
+		}
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_tx_q = bonded_eth_dev->data->tx_queues[i];
+			rte_spinlock_lock(&bd_tx_q->lock);
+		}
+	}
 
 	retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
 
+	if (bonded_eth_dev->data->dev_started) {
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_rx_q = bonded_eth_dev->data->rx_queues[i];
+			rte_spinlock_unlock(&bd_rx_q->lock);
+		}
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_tx_q = bonded_eth_dev->data->tx_queues[i];
+			rte_spinlock_unlock(&bd_tx_q->lock);
+		}
+	}
 	rte_spinlock_unlock(&internals->lock);
-
 	return retval;
 }
 
@@ -541,7 +562,10 @@ rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
 {
 	struct rte_eth_dev *bonded_eth_dev;
 	struct bond_dev_private *internals;
+	struct bond_tx_queue *bd_tx_q;
+	struct bond_rx_queue *bd_rx_q;
 	int retval;
+	uint16_t i;
 
 	if (valid_bonded_port_id(bonded_port_id) != 0)
 		return -1;
@@ -550,11 +574,33 @@ rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
 	internals = bonded_eth_dev->data->dev_private;
 
 	rte_spinlock_lock(&internals->lock);
+	if (bonded_eth_dev->data->dev_started) {
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_rx_q = bonded_eth_dev->data->rx_queues[i];
+			rte_spinlock_lock(&bd_rx_q->lock);
+		}
+
+		for (i = 0; i < bonded_eth_dev->data->nb_tx_queues; i++) {
+			bd_tx_q = bonded_eth_dev->data->tx_queues[i];
+			rte_spinlock_lock(&bd_tx_q->lock);
+		}
+	}
 
 	retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
 
-	rte_spinlock_unlock(&internals->lock);
+	if (bonded_eth_dev->data->dev_started) {
+		for (i = 0; i < bonded_eth_dev->data->nb_tx_queues; i++) {
+			bd_tx_q = bonded_eth_dev->data->tx_queues[i];
+			rte_spinlock_unlock(&bd_tx_q->lock);
+		}
 
+		for (i = 0; i < bonded_eth_dev->data->nb_rx_queues; i++) {
+			bd_rx_q = bonded_eth_dev->data->rx_queues[i];
+			rte_spinlock_unlock(&bd_rx_q->lock);
+		}
+		rte_spinlock_unlock(&internals->lock);
+	}
+	rte_spinlock_unlock(&internals->lock);
 	return retval;
 }
 
-- 
2.6.3

  parent reply	other threads:[~2016-05-26 16:39 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-05 15:14 [dpdk-dev] [PATCH 0/5] bonding: locks Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 1/5] bonding: replace spinlock with read/write lock Bernard Iremonger
2016-05-05 17:12   ` Stephen Hemminger
2016-05-06 10:32     ` Declan Doherty
2016-05-06 15:55       ` Stephen Hemminger
2016-05-13 17:10         ` Ananyev, Konstantin
2016-05-13 17:18           ` Ananyev, Konstantin
2016-05-26 16:24             ` Iremonger, Bernard
2016-05-05 15:14 ` [dpdk-dev] [PATCH 2/5] bonding: add read/write lock to rx/tx burst functions Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 3/5] bonding: remove memcopy of slaves from rx/tx burst function Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 4/5] bonding: add read/write lock to stop function Bernard Iremonger
2016-05-05 15:15 ` [dpdk-dev] [PATCH 5/5] bonding: add read/write lock to the link_update function Bernard Iremonger
2016-05-26 16:38 ` [dpdk-dev] [PATCH v2 0/6] bonding: locks Bernard Iremonger
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 1/6] bonding: add spinlock to rx and tx queues Bernard Iremonger
2016-06-10 18:12     ` Ananyev, Konstantin
2016-06-12 17:11     ` [dpdk-dev] [PATCH v3 0/4] bonding: locks Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 1/4] bonding: add spinlock to rx and tx queues Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 2/4] bonding: grab queue spinlocks in slave add and remove Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 3/4] bonding: take queue spinlock in rx/tx burst functions Bernard Iremonger
2016-06-13  9:18         ` Bruce Richardson
2016-06-13 12:28           ` Iremonger, Bernard
2016-06-16 14:32             ` Bruce Richardson
2016-06-16 15:00               ` Thomas Monjalon
2016-06-16 16:41                 ` Iremonger, Bernard
2016-06-16 18:38                   ` Thomas Monjalon
2017-02-15 18:01                     ` Ferruh Yigit
2017-02-16  9:13                       ` Bruce Richardson
2017-02-16 11:39                         ` Iremonger, Bernard
2017-02-20 11:15                           ` Ferruh Yigit
2016-09-09 11:29         ` Ferruh Yigit
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 4/4] bonding: remove memcpy from " Bernard Iremonger
2016-09-11 12:39         ` Yuanhan Liu
2016-05-26 16:38   ` Bernard Iremonger [this message]
2016-06-10 18:14     ` [dpdk-dev] [PATCH v2 2/6] bonding: grab queue spinlocks in slave add and remove Ananyev, Konstantin
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 3/6] bonding: take queue spinlock in rx/tx burst functions Bernard Iremonger
2016-06-10 18:14     ` Ananyev, Konstantin
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 4/6] bonding: add spinlock to stop function Bernard Iremonger
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 5/6] bonding: add spinlock to link update function Bernard Iremonger
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 6/6] bonding: remove memcpy from burst functions Bernard Iremonger
2016-06-10 18:15     ` Ananyev, Konstantin
2016-06-10 14:45   ` [dpdk-dev] [PATCH v2 0/6] bonding: locks Bruce Richardson
2016-06-10 18:24     ` Iremonger, Bernard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464280727-25752-3-git-send-email-bernard.iremonger@intel.com \
    --to=bernard.iremonger@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).