DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release
@ 2020-10-15  6:38 Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx " Matan Azrad
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Matan Azrad @ 2020-10-15  6:38 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko

The HW objects of the Rx queue is created/destroyed in the device
start\stop stage while the ethdev configurations for the Rx queue
starts from the rx_queue_setup stage.
The PMD should save all the last configurations it got from the ethdev
and to apply them to the device in the dev_start operation.

Wrongly, last code added to mitigate the reference counters didn't take
into account the above rule and combined the configurations and HW
objects to be created\destroyed together.

This causes to memory leak and other memory issues.

Make sure the HW object is released in stop operation when there is no
any reference to it while the configurations stay saved.

Fixes: 24e4b650badc ("net/mlx5: mitigate Rx queue reference counters")

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 23 +++++++++++++----------
 drivers/net/mlx5/mlx5_rxtx.h |  2 +-
 2 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f1d8373..e1783ba 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -447,7 +447,8 @@
 		return -rte_errno;
 	}
 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-	return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+	return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1541,7 +1542,7 @@ struct mlx5_rxq_ctrl *
 	tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
 #endif
 	tmpl->rxq.idx = idx;
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
 error:
@@ -1588,7 +1589,7 @@ struct mlx5_rxq_ctrl *
 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
 	tmpl->hairpin_conf = *hairpin_conf;
 	tmpl->rxq.idx = idx;
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
 }
@@ -1613,7 +1614,7 @@ struct mlx5_rxq_ctrl *
 
 	if (rxq_data) {
 		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-		rte_atomic32_inc(&rxq_ctrl->refcnt);
+		__atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
 	}
 	return rxq_ctrl;
 }
@@ -1638,7 +1639,7 @@ struct mlx5_rxq_ctrl *
 	if (!(*priv->rxqs)[idx])
 		return 0;
 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-	if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
+	if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
 		return 1;
 	if (rxq_ctrl->obj) {
 		priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
@@ -1646,13 +1647,15 @@ struct mlx5_rxq_ctrl *
 		mlx5_free(rxq_ctrl->obj);
 		rxq_ctrl->obj = NULL;
 	}
-	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-		mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
 		rxq_free_elts(rxq_ctrl);
+	if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+		LIST_REMOVE(rxq_ctrl, next);
+		mlx5_free(rxq_ctrl);
+		(*priv->rxqs)[idx] = NULL;
 	}
-	LIST_REMOVE(rxq_ctrl, next);
-	mlx5_free(rxq_ctrl);
-	(*priv->rxqs)[idx] = NULL;
 	return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 674296e..c3734e3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -165,7 +165,7 @@ enum mlx5_rxq_type {
 struct mlx5_rxq_ctrl {
 	struct mlx5_rxq_data rxq; /* Data path structure. */
 	LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
-	rte_atomic32_t refcnt; /* Reference counter. */
+	uint32_t refcnt; /* Reference counter. */
 	struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
 	enum mlx5_rxq_type type; /* Rxq type. */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx queue release
  2020-10-15  6:38 [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Matan Azrad
@ 2020-10-15  6:38 ` Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix event queue number query Matan Azrad
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Matan Azrad @ 2020-10-15  6:38 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko

The HW objects of the Tx queue is created/destroyed in the device
start\stop stage while the ethdev configurations for the Tx queue
starts from the tx_queue_setup stage.
The PMD should save all the last configurations it got from the ethdev
and to apply them to the device in the dev_start operation.

Wrongly, last code added to mitigate the reference counters didn't take
into account the above rule and combined the configurations and HW
objects to be created\destroyed together.

This causes to memory leak and other memory issues.

Make sure the HW object is released in stop operation when there is no
any reference to it while the configurations stay saved.

Fixes: 17a57183c0eb ("net/mlx5: mitigate Tx queue reference counters")

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.h |  2 +-
 drivers/net/mlx5/mlx5_txq.c  | 21 ++++++++++++---------
 2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index c3734e3..b243b6f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -269,7 +269,7 @@ enum mlx5_txq_type {
 /* TX queue control descriptor. */
 struct mlx5_txq_ctrl {
 	LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
-	rte_atomic32_t refcnt; /* Reference counter. */
+	uint32_t refcnt; /* Reference counter. */
 	unsigned int socket; /* CPU socket ID for allocations. */
 	enum mlx5_txq_type type; /* The txq ctrl type. */
 	unsigned int max_inline_data; /* Max inline data. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index af84f5f..9c2dd2a 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1121,7 +1121,7 @@ struct mlx5_txq_ctrl *
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
@@ -1165,7 +1165,7 @@ struct mlx5_txq_ctrl *
 	tmpl->txq.idx = idx;
 	tmpl->hairpin_conf = *hairpin_conf;
 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
 }
@@ -1190,7 +1190,7 @@ struct mlx5_txq_ctrl *
 
 	if (txq_data) {
 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-		rte_atomic32_inc(&ctrl->refcnt);
+		__atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
 	}
 	return ctrl;
 }
@@ -1215,7 +1215,7 @@ struct mlx5_txq_ctrl *
 	if (!(*priv->txqs)[idx])
 		return 0;
 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
+	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
 		return 1;
 	if (txq_ctrl->obj) {
 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1229,12 +1229,15 @@ struct mlx5_txq_ctrl *
 			txq_ctrl->txq.fcqs = NULL;
 		}
 		txq_free_elts(txq_ctrl);
-		mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
 	}
-	LIST_REMOVE(txq_ctrl, next);
-	mlx5_free(txq_ctrl);
-	(*priv->txqs)[idx] = NULL;
 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
+		LIST_REMOVE(txq_ctrl, next);
+		mlx5_free(txq_ctrl);
+		(*priv->txqs)[idx] = NULL;
+	}
 	return 0;
 }
 
@@ -1258,7 +1261,7 @@ struct mlx5_txq_ctrl *
 	if (!(*priv->txqs)[idx])
 		return -1;
 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	return (rte_atomic32_read(&txq->refcnt) == 1);
+	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
 }
 
 /**
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 3/4] net/mlx5: fix event queue number query
  2020-10-15  6:38 [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx " Matan Azrad
@ 2020-10-15  6:38 ` Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 4/4] net/mlx5/linux: fix Tx queue operations decision Matan Azrad
  2020-10-18 11:58 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Raslan Darawsheh
  3 siblings, 0 replies; 5+ messages in thread
From: Matan Azrad @ 2020-10-15  6:38 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, stable

When a Rx\Tx queue is created by DevX, its CQ configuration should
include the EQ number of the interrupts.
The EQ is managed by the kernel and there is a glue API in order to
query the EQ number from the kernel.
The EQ query API gets a vector number specifies the kernel vector of
the interrupt handling.

The vector number was wrongly detected according to the configuration
CPU instead of using the device attributes of the supported vectors.
The CPU was wrongly detected by the rte_lcore_to_cpu_id API without any
check, and in case of non-EAL thread context the value was 0xFFFFFFFF
which caused a failure in the EQ number query API.

Use vector 0 for each EQ number query which must be supported by the
kernel.

Fixes: 08d1838f645a ("net/mlx5: implement CQ for Rx using DevX API")
Fixes: d133f4cdb706 ("net/mlx5: create clock queue for packet pacing")
Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 44822ad..f1e3579 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -947,10 +947,8 @@ struct mlx5_dev_ctx_shared *
 		goto error;
 	}
 	if (sh->devx) {
-		uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
-
 		/* Query the EQN for this core. */
-		err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn);
+		err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn);
 		if (err) {
 			rte_errno = errno;
 			DRV_LOG(ERR, "Failed to query event queue number %d.",
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 4/4] net/mlx5/linux: fix Tx queue operations decision
  2020-10-15  6:38 [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx " Matan Azrad
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix event queue number query Matan Azrad
@ 2020-10-15  6:38 ` Matan Azrad
  2020-10-18 11:58 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Raslan Darawsheh
  3 siblings, 0 replies; 5+ messages in thread
From: Matan Azrad @ 2020-10-15  6:38 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko

One of the conditions to create Tx queue object by DevX is to be sure
that the DPDK mlx5 driver is not going to be the E-Switch manager of
the device. The issue is with the default FDB flows managed by the
kernel driver, which are not created by the kernel when the Tx queues
are created by DevX.

The current decision is to create the Tx queues by Verbs when E-Switch
is enabled while the current behavior uses an opposite condition to
create them by DevX.

Create the Tx queues by Verbs when E-Switch is enabled.

Fixes: 86d259cec852 ("net/mlx5: separate Tx queue object creations")

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 53 +++++++++++++---------------------------
 1 file changed, 17 insertions(+), 36 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 09d0944..d177b4f 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -526,26 +526,16 @@
 mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
 	struct mlx5_txq_ctrl *txq_ctrl =
 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * When DevX is supported and DV flow is enable, and dest tir is enable,
-	 * hairpin functions use DevX API.
-	 * When, in addition, DV E-Switch is enable and DevX uar offset is
-	 * supported, all Tx functions also use DevX API.
-	 * Otherwise, all Tx functions use Verbs API.
-	 */
-	if (config->devx && config->dv_flow_en && config->dest_tir) {
-		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
-			return mlx5_txq_devx_obj_new(dev, idx);
+	if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
+		return mlx5_txq_devx_obj_new(dev, idx);
 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
-		if (config->dv_esw_en)
-			return mlx5_txq_devx_obj_new(dev, idx);
+	if (!priv->config.dv_esw_en)
+		return mlx5_txq_devx_obj_new(dev, idx);
 #endif
-	}
 	return mlx5_txq_ibv_obj_new(dev, idx);
 }
 
@@ -558,20 +548,16 @@
 static void
 mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
 {
-	struct mlx5_dev_config *config = &txq_obj->txq_ctrl->priv->config;
-
-	if (config->devx && config->dv_flow_en && config->dest_tir) {
+	if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
+		mlx5_txq_devx_obj_release(txq_obj);
+		return;
+	}
 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
-		if (config->dv_esw_en) {
-			mlx5_txq_devx_obj_release(txq_obj);
-			return;
-		}
-#endif
-		if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
-			mlx5_txq_devx_obj_release(txq_obj);
-			return;
-		}
+	if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) {
+		mlx5_txq_devx_obj_release(txq_obj);
+		return;
 	}
+#endif
 	mlx5_txq_ibv_obj_release(txq_obj);
 }
 
@@ -1377,12 +1363,6 @@
 			goto error;
 		}
 	}
-	/*
-	 * Initialize the dev_ops structure with DevX/Verbs function pointers.
-	 * When DevX is supported and both DV flow and dest tir are enabled, all
-	 * Rx functions use DevX API (except for drop that has not yet been
-	 * implemented in DevX).
-	 */
 	if (config->devx && config->dv_flow_en && config->dest_tir) {
 		priv->obj_ops = devx_obj_ops;
 		priv->obj_ops.drop_action_create =
@@ -1392,16 +1372,17 @@
 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
 		priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
 #else
-		if (!config->dv_esw_en)
+		if (config->dv_esw_en)
 			priv->obj_ops.txq_obj_modify =
 						ibv_obj_ops.txq_obj_modify;
 #endif
+		/* Use specific wrappers for Tx object. */
+		priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
+		priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
+
 	} else {
 		priv->obj_ops = ibv_obj_ops;
 	}
-	/* The Tx objects are managed by a specific linux wrapper functions. */
-	priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
-	priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
 	/* Supported Verbs flow priority number detection. */
 	err = mlx5_flow_discover_priorities(eth_dev);
 	if (err < 0) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release
  2020-10-15  6:38 [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Matan Azrad
                   ` (2 preceding siblings ...)
  2020-10-15  6:38 ` [dpdk-dev] [PATCH 4/4] net/mlx5/linux: fix Tx queue operations decision Matan Azrad
@ 2020-10-18 11:58 ` Raslan Darawsheh
  3 siblings, 0 replies; 5+ messages in thread
From: Raslan Darawsheh @ 2020-10-18 11:58 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: Slava Ovsiienko

Hi,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Matan Azrad
> Sent: Thursday, October 15, 2020 9:38 AM
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release
> 
> The HW objects of the Rx queue is created/destroyed in the device
> start\stop stage while the ethdev configurations for the Rx queue
> starts from the rx_queue_setup stage.
> The PMD should save all the last configurations it got from the ethdev
> and to apply them to the device in the dev_start operation.
> 
> Wrongly, last code added to mitigate the reference counters didn't take
> into account the above rule and combined the configurations and HW
> objects to be created\destroyed together.
> 
> This causes to memory leak and other memory issues.
> 
> Make sure the HW object is released in stop operation when there is no
> any reference to it while the configurations stay saved.
> 
> Fixes: 24e4b650badc ("net/mlx5: mitigate Rx queue reference counters")
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_rxq.c  | 23 +++++++++++++----------
>  drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  2 files changed, 14 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index f1d8373..e1783ba 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -447,7 +447,8 @@
>  		return -rte_errno;
>  	}
>  	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
> -	return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
> +	return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)
> == 1);
> +
>  }
> 
>  /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
> @@ -1541,7 +1542,7 @@ struct mlx5_rxq_ctrl *
>  	tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
>  #endif
>  	tmpl->rxq.idx = idx;
> -	rte_atomic32_inc(&tmpl->refcnt);
> +	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
>  	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
>  	return tmpl;
>  error:
> @@ -1588,7 +1589,7 @@ struct mlx5_rxq_ctrl *
>  	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
>  	tmpl->hairpin_conf = *hairpin_conf;
>  	tmpl->rxq.idx = idx;
> -	rte_atomic32_inc(&tmpl->refcnt);
> +	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
>  	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
>  	return tmpl;
>  }
> @@ -1613,7 +1614,7 @@ struct mlx5_rxq_ctrl *
> 
>  	if (rxq_data) {
>  		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
> -		rte_atomic32_inc(&rxq_ctrl->refcnt);
> +		__atomic_add_fetch(&rxq_ctrl->refcnt, 1,
> __ATOMIC_RELAXED);
>  	}
>  	return rxq_ctrl;
>  }
> @@ -1638,7 +1639,7 @@ struct mlx5_rxq_ctrl *
>  	if (!(*priv->rxqs)[idx])
>  		return 0;
>  	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
> -	if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
> +	if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) >
> 1)
>  		return 1;
>  	if (rxq_ctrl->obj) {
>  		priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
> @@ -1646,13 +1647,15 @@ struct mlx5_rxq_ctrl *
>  		mlx5_free(rxq_ctrl->obj);
>  		rxq_ctrl->obj = NULL;
>  	}
> -	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
> -		mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
> +	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
>  		rxq_free_elts(rxq_ctrl);
> +	if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
> +		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
> +			mlx5_mr_btree_free(&rxq_ctrl-
> >rxq.mr_ctrl.cache_bh);
> +		LIST_REMOVE(rxq_ctrl, next);
> +		mlx5_free(rxq_ctrl);
> +		(*priv->rxqs)[idx] = NULL;
>  	}
> -	LIST_REMOVE(rxq_ctrl, next);
> -	mlx5_free(rxq_ctrl);
> -	(*priv->rxqs)[idx] = NULL;
>  	return 0;
>  }
> 
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index 674296e..c3734e3 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -165,7 +165,7 @@ enum mlx5_rxq_type {
>  struct mlx5_rxq_ctrl {
>  	struct mlx5_rxq_data rxq; /* Data path structure. */
>  	LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
> -	rte_atomic32_t refcnt; /* Reference counter. */
> +	uint32_t refcnt; /* Reference counter. */
>  	struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
>  	struct mlx5_priv *priv; /* Back pointer to private data. */
>  	enum mlx5_rxq_type type; /* Rxq type. */
> --
> 1.8.3.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-10-18 11:58 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-15  6:38 [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Matan Azrad
2020-10-15  6:38 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx " Matan Azrad
2020-10-15  6:38 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix event queue number query Matan Azrad
2020-10-15  6:38 ` [dpdk-dev] [PATCH 4/4] net/mlx5/linux: fix Tx queue operations decision Matan Azrad
2020-10-18 11:58 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).