DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/vmxnet3: add spinlocks to register command access
@ 2021-11-08  8:23 sahithi.singam
  2021-11-16 17:16 ` Ferruh Yigit
  2021-11-30  7:31 ` Yong Wang
  0 siblings, 2 replies; 4+ messages in thread
From: sahithi.singam @ 2021-11-08  8:23 UTC (permalink / raw)
  To: yongwang; +Cc: dev, Sahithi Singam

From: Sahithi Singam <sahithi.singam@oracle.com>

At present, there are no spinlocks around register command access.
This resulted in a race condition when two threads running on
two different cores invoked link_update function at the same time
to get link status. Due to this race condition, one of the threads
reported false link status value.

Signed-off-by: Sahithi Singam <sahithi.singam@oracle.com>
---
 drivers/net/vmxnet3/vmxnet3_ethdev.c | 37 ++++++++++++++++++++++++++++
 drivers/net/vmxnet3/vmxnet3_ethdev.h |  1 +
 drivers/net/vmxnet3/vmxnet3_rxtx.c   |  2 ++
 3 files changed, 40 insertions(+)

diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index d1ef1cad08..d4a433e0db 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -252,9 +252,11 @@ eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
 {
 	uint16 txdata_desc_size;
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
 	txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
 		txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
@@ -285,6 +287,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
 	eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	rte_spinlock_init(&hw->cmd_lock);
 
 	/* extra mbuf field is required to guess MSS */
 	vmxnet3_segs_dynfield_offset =
@@ -375,7 +378,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
 
 	/* Put device in Quiesce Mode */
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	/* allow untagged pkts */
 	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
@@ -451,9 +456,11 @@ vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
 	int nvec = 1; /* for link event */
 
 	/* intr settings */
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_GET_CONF_INTR);
 	cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+	rte_spinlock_unlock(&hw->cmd_lock);
 	hw->intr.type = cfg & 0x3;
 	hw->intr.mask_mode = (cfg >> 2) & 0x3;
 
@@ -910,8 +917,10 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
 
 	/* Activate device by register write */
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
@@ -921,9 +930,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
 	/* Setup memory region for rx buffers */
 	ret = vmxnet3_dev_setup_memreg(dev);
 	if (ret == 0) {
+		rte_spinlock_lock(&hw->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_REGISTER_MEMREGS);
 		ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+		rte_spinlock_unlock(&hw->cmd_lock);
 		if (ret != 0)
 			PMD_INIT_LOG(DEBUG,
 				     "Failed in setup memory region cmd\n");
@@ -1027,12 +1038,16 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 	rte_intr_vec_list_free(intr_handle);
 
 	/* quiesce the device first */
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+	rte_spinlock_unlock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
 
 	/* reset the device */
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+	rte_spinlock_unlock(&hw->cmd_lock);
 	PMD_INIT_LOG(DEBUG, "Device reset.");
 
 	vmxnet3_dev_clear_queues(dev);
@@ -1182,7 +1197,9 @@ vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
 {
 	unsigned int i;
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
 
@@ -1285,7 +1302,9 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	struct UPT1_TxStats txStats;
 	struct UPT1_RxStats rxStats;
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
 	for (i = 0; i < hw->num_tx_queues; i++) {
@@ -1335,7 +1354,9 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
 	struct UPT1_TxStats txStats = {0};
 	struct UPT1_RxStats rxStats = {0};
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
 
@@ -1443,8 +1464,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
 
 	memset(&link, 0, sizeof(link));
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	if (ret & 0x1)
 		link.link_status = RTE_ETH_LINK_UP;
@@ -1476,7 +1499,9 @@ vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
 	else
 		rxConf->rxMode = rxConf->rxMode & (~feature);
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
+	rte_spinlock_unlock(&hw->cmd_lock);
 }
 
 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
@@ -1489,8 +1514,10 @@ vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	return 0;
 }
@@ -1508,8 +1535,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	else
 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	return 0;
 }
@@ -1560,8 +1589,10 @@ vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
 	else
 		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	rte_spinlock_unlock(&hw->cmd_lock);
 	return 0;
 }
 
@@ -1579,8 +1610,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		else
 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
 
+		rte_spinlock_lock(&hw->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_UPDATE_FEATURE);
+		rte_spinlock_unlock(&hw->cmd_lock);
 	}
 
 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
@@ -1589,8 +1622,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		else
 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
 
+		rte_spinlock_lock(&hw->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+		rte_spinlock_unlock(&hw->cmd_lock);
 	}
 
 	return 0;
@@ -1622,8 +1657,10 @@ vmxnet3_process_events(struct rte_eth_dev *dev)
 
 	/* Check if there is an error on xmit/recv queues */
 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+		rte_spinlock_lock(&hw->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_GET_QUEUE_STATUS);
+		rte_spinlock_unlock(&hw->cmd_lock);
 
 		if (hw->tqd_start->status.stopped)
 			PMD_DRV_LOG(ERR, "tq error 0x%x",
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index ef858ac951..d07a8f2757 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -108,6 +108,7 @@ struct vmxnet3_hw {
 	uint64_t              queueDescPA;
 	uint16_t              queue_desc_len;
 	uint16_t              mtu;
+	rte_spinlock_t        cmd_lock;
 
 	VMXNET3_RSSConf       *rss_conf;
 	uint64_t              rss_confPA;
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index deba64be6a..c87f1c6470 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1334,9 +1334,11 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
+	rte_spinlock_lock(&hw->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_SET_RSS_FIELDS);
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+	rte_spinlock_unlock(&hw->cmd_lock);
 
 	if (ret != VMXNET3_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Set RSS fields (v4) failed: %d", ret);
-- 
2.32.0.windows.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH] net/vmxnet3: add spinlocks to register command access
  2021-11-08  8:23 [dpdk-dev] [PATCH] net/vmxnet3: add spinlocks to register command access sahithi.singam
@ 2021-11-16 17:16 ` Ferruh Yigit
  2021-11-30  7:31 ` Yong Wang
  1 sibling, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2021-11-16 17:16 UTC (permalink / raw)
  To: yongwang; +Cc: dev, sahithi.singam

On 11/8/2021 8:23 AM, sahithi.singam@oracle.com wrote:
> From: Sahithi Singam<sahithi.singam@oracle.com>
> 
> At present, there are no spinlocks around register command access.
> This resulted in a race condition when two threads running on
> two different cores invoked link_update function at the same time
> to get link status. Due to this race condition, one of the threads
> reported false link status value.
> 
> Signed-off-by: Sahithi Singam<sahithi.singam@oracle.com>

Hi Yong, can you please review this patch?

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] net/vmxnet3: add spinlocks to register command access
  2021-11-08  8:23 [dpdk-dev] [PATCH] net/vmxnet3: add spinlocks to register command access sahithi.singam
  2021-11-16 17:16 ` Ferruh Yigit
@ 2021-11-30  7:31 ` Yong Wang
  2021-11-30  8:58   ` Ferruh Yigit
  1 sibling, 1 reply; 4+ messages in thread
From: Yong Wang @ 2021-11-30  7:31 UTC (permalink / raw)
  To: sahithi.singam; +Cc: dev

-----Original Message-----
From: "sahithi.singam@oracle.com" <sahithi.singam@oracle.com>
Date: Monday, November 8, 2021 at 12:23 AM
To: Yong Wang <yongwang@vmware.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, Sahithi Singam <sahithi.singam@oracle.com>
Subject: [PATCH] net/vmxnet3: add spinlocks to register command access

    From: Sahithi Singam <sahithi.singam@oracle.com>

    At present, there are no spinlocks around register command access.
    This resulted in a race condition when two threads running on
    two different cores invoked link_update function at the same time
    to get link status. Due to this race condition, one of the threads
    reported false link status value.

    Signed-off-by: Sahithi Singam <sahithi.singam@oracle.com>
    ---

Thanks Sahithi for the patch.  As we discussed offline, in DPDK, the expectation is that control level synchronization should be handled by the application.  In my knowledge, currently no PMD guarantee such synchronization at driver callback level.  It makes more sense to have the application manages the synchronization as most likely it needs to work with multiple PMDs and it's better to keep this behavior consistent across all PMDs (i.e, it does not make a lot of sense to support this behavior only in one particular PMD).

     drivers/net/vmxnet3/vmxnet3_ethdev.c | 37 ++++++++++++++++++++++++++++
     drivers/net/vmxnet3/vmxnet3_ethdev.h |  1 +
     drivers/net/vmxnet3/vmxnet3_rxtx.c   |  2 ++
     3 files changed, 40 insertions(+)

    diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
    index d1ef1cad08..d4a433e0db 100644
    --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
    +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
    @@ -252,9 +252,11 @@ eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
     {
     	uint16 txdata_desc_size;

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
     	txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
     		txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
    @@ -285,6 +287,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
     	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
     	eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
     	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
    +	rte_spinlock_init(&hw->cmd_lock);

     	/* extra mbuf field is required to guess MSS */
     	vmxnet3_segs_dynfield_offset =
    @@ -375,7 +378,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
     		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);

     	/* Put device in Quiesce Mode */
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	/* allow untagged pkts */
     	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
    @@ -451,9 +456,11 @@ vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
     	int nvec = 1; /* for link event */

     	/* intr settings */
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_GET_CONF_INTR);
     	cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +	rte_spinlock_unlock(&hw->cmd_lock);
     	hw->intr.type = cfg & 0x3;
     	hw->intr.mask_mode = (cfg >> 2) & 0x3;

    @@ -910,8 +917,10 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
     			       VMXNET3_GET_ADDR_HI(hw->sharedPA));

     	/* Activate device by register write */
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
     	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	if (ret != 0) {
     		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
    @@ -921,9 +930,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
     	/* Setup memory region for rx buffers */
     	ret = vmxnet3_dev_setup_memreg(dev);
     	if (ret == 0) {
    +		rte_spinlock_lock(&hw->cmd_lock);
     		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     				       VMXNET3_CMD_REGISTER_MEMREGS);
     		ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +		rte_spinlock_unlock(&hw->cmd_lock);
     		if (ret != 0)
     			PMD_INIT_LOG(DEBUG,
     				     "Failed in setup memory region cmd\n");
    @@ -1027,12 +1038,16 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
     	rte_intr_vec_list_free(intr_handle);

     	/* quiesce the device first */
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
    +	rte_spinlock_unlock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);

     	/* reset the device */
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
    +	rte_spinlock_unlock(&hw->cmd_lock);
     	PMD_INIT_LOG(DEBUG, "Device reset.");

     	vmxnet3_dev_clear_queues(dev);
    @@ -1182,7 +1197,9 @@ vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
     {
     	unsigned int i;

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);

    @@ -1285,7 +1302,9 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
     	struct UPT1_TxStats txStats;
     	struct UPT1_RxStats rxStats;

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
     	for (i = 0; i < hw->num_tx_queues; i++) {
    @@ -1335,7 +1354,9 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
     	struct UPT1_TxStats txStats = {0};
     	struct UPT1_RxStats rxStats = {0};

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);

    @@ -1443,8 +1464,10 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,

     	memset(&link, 0, sizeof(link));

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
     	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	if (ret & 0x1)
     		link.link_status = RTE_ETH_LINK_UP;
    @@ -1476,7 +1499,9 @@ vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
     	else
     		rxConf->rxMode = rxConf->rxMode & (~feature);

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
    +	rte_spinlock_unlock(&hw->cmd_lock);
     }

     /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
    @@ -1489,8 +1514,10 @@ vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
     	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
     	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	return 0;
     }
    @@ -1508,8 +1535,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
     	else
     		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
     	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	return 0;
     }
    @@ -1560,8 +1589,10 @@ vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
     	else
     		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
    +	rte_spinlock_unlock(&hw->cmd_lock);
     	return 0;
     }

    @@ -1579,8 +1610,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
     		else
     			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;

    +		rte_spinlock_lock(&hw->cmd_lock);
     		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     				       VMXNET3_CMD_UPDATE_FEATURE);
    +		rte_spinlock_unlock(&hw->cmd_lock);
     	}

     	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
    @@ -1589,8 +1622,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
     		else
     			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);

    +		rte_spinlock_lock(&hw->cmd_lock);
     		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
    +		rte_spinlock_unlock(&hw->cmd_lock);
     	}

     	return 0;
    @@ -1622,8 +1657,10 @@ vmxnet3_process_events(struct rte_eth_dev *dev)

     	/* Check if there is an error on xmit/recv queues */
     	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
    +		rte_spinlock_lock(&hw->cmd_lock);
     		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     				       VMXNET3_CMD_GET_QUEUE_STATUS);
    +		rte_spinlock_unlock(&hw->cmd_lock);

     		if (hw->tqd_start->status.stopped)
     			PMD_DRV_LOG(ERR, "tq error 0x%x",
    diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
    index ef858ac951..d07a8f2757 100644
    --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
    +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
    @@ -108,6 +108,7 @@ struct vmxnet3_hw {
     	uint64_t              queueDescPA;
     	uint16_t              queue_desc_len;
     	uint16_t              mtu;
    +	rte_spinlock_t        cmd_lock;

     	VMXNET3_RSSConf       *rss_conf;
     	uint64_t              rss_confPA;
    diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
    index deba64be6a..c87f1c6470 100644
    --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
    +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
    @@ -1334,9 +1334,11 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
     	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
     		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;

    +	rte_spinlock_lock(&hw->cmd_lock);
     	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
     			       VMXNET3_CMD_SET_RSS_FIELDS);
     	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
    +	rte_spinlock_unlock(&hw->cmd_lock);

     	if (ret != VMXNET3_SUCCESS) {
     		PMD_DRV_LOG(ERR, "Set RSS fields (v4) failed: %d", ret);
    -- 
    2.32.0.windows.1



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] net/vmxnet3: add spinlocks to register command access
  2021-11-30  7:31 ` Yong Wang
@ 2021-11-30  8:58   ` Ferruh Yigit
  0 siblings, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2021-11-30  8:58 UTC (permalink / raw)
  To: Yong Wang, sahithi.singam; +Cc: dev

On 11/30/2021 7:31 AM, Yong Wang wrote:
> -----Original Message-----
> From:"sahithi.singam@oracle.com"  <sahithi.singam@oracle.com>
> Date: Monday, November 8, 2021 at 12:23 AM
> To: Yong Wang<yongwang@vmware.com>
> Cc:"dev@dpdk.org"  <dev@dpdk.org>, Sahithi Singam<sahithi.singam@oracle.com>
> Subject: [PATCH] net/vmxnet3: add spinlocks to register command access
> 
>      From: Sahithi Singam<sahithi.singam@oracle.com>
> 
>      At present, there are no spinlocks around register command access.
>      This resulted in a race condition when two threads running on
>      two different cores invoked link_update function at the same time
>      to get link status. Due to this race condition, one of the threads
>      reported false link status value.
> 
>      Signed-off-by: Sahithi Singam<sahithi.singam@oracle.com>
>      ---
> 
> Thanks Sahithi for the patch.  As we discussed offline, in DPDK, the expectation is that control level synchronization should be handled by the application.  In my knowledge, currently no PMD guarantee such synchronization at driver callback level.  It makes more sense to have the application manages the synchronization as most likely it needs to work with multiple PMDs and it's better to keep this behavior consistent across all PMDs (i.e, it does not make a lot of sense to support this behavior only in one particular PMD).

ack,
updating patch status as rejected.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-11-30  8:58 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-08  8:23 [dpdk-dev] [PATCH] net/vmxnet3: add spinlocks to register command access sahithi.singam
2021-11-16 17:16 ` Ferruh Yigit
2021-11-30  7:31 ` Yong Wang
2021-11-30  8:58   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).