DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/5] net/mlx5: some counter fixes
@ 2022-10-31 16:08 Michael Baum
  2022-10-31 16:08 ` [PATCH 1/5] net/mlx5: fix race condition in counter pool resizing Michael Baum
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko

Some fixes for HW/SW steering counters.

Michael Baum (5):
  net/mlx5: fix race condition in counter pool resizing
  net/mlx5: fix accessing the wrong counter
  net/mlx5: fix missing counter elements copies in r2r cases
  net/mlx5: add assertions in counter get/put
  net/mlx5: assert for enough space in counter rings

 drivers/net/mlx5/mlx5.c            |  28 ++++++-
 drivers/net/mlx5/mlx5.h            |   7 +-
 drivers/net/mlx5/mlx5_flow.c       |  24 +++---
 drivers/net/mlx5/mlx5_flow_dv.c    |  53 +++----------
 drivers/net/mlx5/mlx5_flow_hw.c    |   2 +-
 drivers/net/mlx5/mlx5_flow_verbs.c |  23 ++----
 drivers/net/mlx5/mlx5_hws_cnt.c    |  25 +++---
 drivers/net/mlx5/mlx5_hws_cnt.h    | 117 ++++++++++++++++-------------
 8 files changed, 131 insertions(+), 148 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/5] net/mlx5: fix race condition in counter pool resizing
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
@ 2022-10-31 16:08 ` Michael Baum
  2022-10-31 16:08 ` [PATCH 2/5] net/mlx5: fix accessing the wrong counter Michael Baum
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev
  Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, suanmingm, stable

Counter management structure has array of counter pools. This array is
invalid in management structure initialization and grows on demand.

The resizing include:
1. Allocate memory for the new size.
2. Copy the existing data to the new memory.
3. Move the pointer to the new memory.
4. Free the old memory.

The third step can be performed before for this function, and compiler
may do that, but another thread might read the pointer before coping and
read invalid data or even crash.

This patch allocates memory for this array once in management structure
initialization and limit the counters number by 16M.

Fixes: 3aa279157fa0 ("net/mlx5: synchronize flow counter pool creation")
Cc: suanmingm@nvidia.com
Cc: stable@dpdk.org

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.c            | 28 +++++++++++++---
 drivers/net/mlx5/mlx5.h            |  7 ++--
 drivers/net/mlx5/mlx5_flow.c       | 24 +++++++-------
 drivers/net/mlx5/mlx5_flow_dv.c    | 53 +++++-------------------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 23 +++----------
 5 files changed, 52 insertions(+), 83 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 78234b116c..b85a56ec24 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -561,18 +561,34 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
  *
  * @param[in] sh
  *   Pointer to mlx5_dev_ctx_shared object to free
+ *
+ * @return
+ *   0 on success, otherwise negative errno value and rte_errno is set.
  */
-static void
+static int
 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
 	int i, j;
 
 	if (sh->config.dv_flow_en < 2) {
+		void *pools;
+
+		pools = mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(struct mlx5_flow_counter_pool *) *
+				    MLX5_COUNTER_POOLS_MAX_NUM,
+				    0, SOCKET_ID_ANY);
+		if (!pools) {
+			DRV_LOG(ERR,
+				"Counter management allocation was failed.");
+			rte_errno = ENOMEM;
+			return -rte_errno;
+		}
 		memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
 		TAILQ_INIT(&sh->sws_cmng.flow_counters);
 		sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
 		sh->sws_cmng.max_id = -1;
 		sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
+		sh->sws_cmng.pools = pools;
 		rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
 		for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
 			TAILQ_INIT(&sh->sws_cmng.counters[i]);
@@ -598,6 +614,7 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 		sh->hws_max_log_bulk_sz = log_dcs;
 		sh->hws_max_nb_counters = max_nb_cnts;
 	}
+	return 0;
 }
 
 /**
@@ -655,8 +672,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
 					claim_zero
 					 (mlx5_flow_os_destroy_flow_action
 					  (cnt->action));
-				if (fallback && MLX5_POOL_GET_CNT
-				    (pool, j)->dcs_when_free)
+				if (fallback && cnt->dcs_when_free)
 					claim_zero(mlx5_devx_cmd_destroy
 						   (cnt->dcs_when_free));
 			}
@@ -1572,8 +1588,12 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		if (err)
 			goto error;
 	}
+	err = mlx5_flow_counters_mng_init(sh);
+	if (err) {
+		DRV_LOG(ERR, "Fail to initialize counters manage.");
+		goto error;
+	}
 	mlx5_flow_aging_init(sh);
-	mlx5_flow_counters_mng_init(sh);
 	mlx5_flow_ipool_create(sh);
 	/* Add context to the global device list. */
 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index c9fcb71b69..cbe2d88b9e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -386,11 +386,10 @@ struct mlx5_hw_q {
 } __rte_cache_aligned;
 
 
-
-
+#define MLX5_COUNTER_POOLS_MAX_NUM (1 << 15)
 #define MLX5_COUNTERS_PER_POOL 512
 #define MLX5_MAX_PENDING_QUERIES 4
-#define MLX5_CNT_CONTAINER_RESIZE 64
+#define MLX5_CNT_MR_ALLOC_BULK 64
 #define MLX5_CNT_SHARED_OFFSET 0x80000000
 #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \
 			   MLX5_CNT_BATCH_OFFSET)
@@ -549,7 +548,6 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
 /* Counter global management structure. */
 struct mlx5_flow_counter_mng {
 	volatile uint16_t n_valid; /* Number of valid pools. */
-	uint16_t n; /* Number of pools. */
 	uint16_t last_pool_idx; /* Last used pool index */
 	int min_id; /* The minimum counter ID in the pools. */
 	int max_id; /* The maximum counter ID in the pools. */
@@ -621,6 +619,7 @@ struct mlx5_aso_age_action {
 };
 
 #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512
+#define MLX5_ASO_AGE_CONTAINER_RESIZE 64
 
 struct mlx5_aso_age_pool {
 	struct mlx5_devx_obj *flow_hit_aso_obj;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8e7d649d15..e25154199f 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -9063,7 +9063,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_counter_stats_mem_mng *mem_mng;
 	volatile struct flow_counter_stats *raw_data;
-	int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
+	int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES;
 	int size = (sizeof(struct flow_counter_stats) *
 			MLX5_COUNTERS_PER_POOL +
 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
@@ -9101,7 +9101,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
 	}
 	for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
 		LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws,
-				 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
+				 mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i,
 				 next);
 	LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next);
 	sh->sws_cmng.mem_mng = mem_mng;
@@ -9125,14 +9125,13 @@ mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
 {
 	struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
 	/* Resize statistic memory once used out. */
-	if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
+	if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) &&
 	    mlx5_flow_create_counter_stat_mem_mng(sh)) {
 		DRV_LOG(ERR, "Cannot resize counter stat mem.");
 		return -1;
 	}
 	rte_spinlock_lock(&pool->sl);
-	pool->raw = cmng->mem_mng->raws + pool->index %
-		    MLX5_CNT_CONTAINER_RESIZE;
+	pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK;
 	rte_spinlock_unlock(&pool->sl);
 	pool->raw_hw = NULL;
 	return 0;
@@ -9174,13 +9173,13 @@ void
 mlx5_flow_query_alarm(void *arg)
 {
 	struct mlx5_dev_ctx_shared *sh = arg;
-	int ret;
-	uint16_t pool_index = sh->sws_cmng.pool_index;
 	struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
+	uint16_t pool_index = cmng->pool_index;
 	struct mlx5_flow_counter_pool *pool;
 	uint16_t n_valid;
+	int ret;
 
-	if (sh->sws_cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
+	if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES)
 		goto set_alarm;
 	rte_spinlock_lock(&cmng->pool_update_sl);
 	pool = cmng->pools[pool_index];
@@ -9192,8 +9191,7 @@ mlx5_flow_query_alarm(void *arg)
 	if (pool->raw_hw)
 		/* There is a pool query in progress. */
 		goto set_alarm;
-	pool->raw_hw =
-		LIST_FIRST(&sh->sws_cmng.free_stat_raws);
+	pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws);
 	if (!pool->raw_hw)
 		/* No free counter statistics raw memory. */
 		goto set_alarm;
@@ -9219,12 +9217,12 @@ mlx5_flow_query_alarm(void *arg)
 		goto set_alarm;
 	}
 	LIST_REMOVE(pool->raw_hw, next);
-	sh->sws_cmng.pending_queries++;
+	cmng->pending_queries++;
 	pool_index++;
 	if (pool_index >= n_valid)
 		pool_index = 0;
 set_alarm:
-	sh->sws_cmng.pool_index = pool_index;
+	cmng->pool_index = pool_index;
 	mlx5_set_query_alarm(sh);
 }
 
@@ -9755,7 +9753,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
 	}
 
 	/* get counter */
-	MLX5_ASSERT(cmng->n_valid <= cmng->n);
+	MLX5_ASSERT(cmng->n_valid <= MLX5_COUNTER_POOLS_MAX_NUM);
 	max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
 	for (j = 1; j <= max; j++) {
 		action = NULL;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1e52278191..e77cbb862b 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6091,7 +6091,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
 
 	/* Decrease to original index and clear shared bit. */
 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
-	MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
+	MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < MLX5_COUNTER_POOLS_MAX_NUM);
 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
 	MLX5_ASSERT(pool);
 	if (ppool)
@@ -6167,39 +6167,6 @@ flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
 	return pool;
 }
 
-/**
- * Resize a counter container.
- *
- * @param[in] dev
- *   Pointer to the Ethernet device structure.
- *
- * @return
- *   0 on success, otherwise negative errno value and rte_errno is set.
- */
-static int
-flow_dv_container_resize(struct rte_eth_dev *dev)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
-	void *old_pools = cmng->pools;
-	uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
-	uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
-	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
-
-	if (!pools) {
-		rte_errno = ENOMEM;
-		return -ENOMEM;
-	}
-	if (old_pools)
-		memcpy(pools, old_pools, cmng->n *
-				       sizeof(struct mlx5_flow_counter_pool *));
-	cmng->n = resize;
-	cmng->pools = pools;
-	if (old_pools)
-		mlx5_free(old_pools);
-	return 0;
-}
-
 /**
  * Query a devx flow counter.
  *
@@ -6251,8 +6218,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
  *   The devX counter handle.
  * @param[in] age
  *   Whether the pool is for counter that was allocated for aging.
- * @param[in/out] cont_cur
- *   Pointer to the container pointer, it will be update in pool resize.
  *
  * @return
  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
@@ -6264,9 +6229,14 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_counter_pool *pool;
 	struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
-	bool fallback = priv->sh->sws_cmng.counter_fallback;
+	bool fallback = cmng->counter_fallback;
 	uint32_t size = sizeof(*pool);
 
+	if (cmng->n_valid == MLX5_COUNTER_POOLS_MAX_NUM) {
+		DRV_LOG(ERR, "All counter is in used, try again later.");
+		rte_errno = EAGAIN;
+		return NULL;
+	}
 	size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
 	size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
 	pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
@@ -6285,11 +6255,6 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
 	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
 	rte_spinlock_lock(&cmng->pool_update_sl);
 	pool->index = cmng->n_valid;
-	if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
-		mlx5_free(pool);
-		rte_spinlock_unlock(&cmng->pool_update_sl);
-		return NULL;
-	}
 	cmng->pools[pool->index] = pool;
 	cmng->n_valid++;
 	if (unlikely(fallback)) {
@@ -12511,7 +12476,7 @@ flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
 }
 
 /**
- * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ * Resize the ASO age pools array by MLX5_ASO_AGE_CONTAINER_RESIZE pools.
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
@@ -12525,7 +12490,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
 	void *old_pools = mng->pools;
-	uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+	uint32_t resize = mng->n + MLX5_ASO_AGE_CONTAINER_RESIZE;
 	uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
 	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
 
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 81a33ddf09..4bca685674 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -232,27 +232,14 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
 			break;
 	}
 	if (!cnt) {
-		struct mlx5_flow_counter_pool **pools;
 		uint32_t size;
 
-		if (n_valid == cmng->n) {
-			/* Resize the container pool array. */
-			size = sizeof(struct mlx5_flow_counter_pool *) *
-				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
-			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
-					    SOCKET_ID_ANY);
-			if (!pools)
-				return 0;
-			if (n_valid) {
-				memcpy(pools, cmng->pools,
-				       sizeof(struct mlx5_flow_counter_pool *) *
-				       n_valid);
-				mlx5_free(cmng->pools);
-			}
-			cmng->pools = pools;
-			cmng->n += MLX5_CNT_CONTAINER_RESIZE;
+		if (n_valid == MLX5_COUNTER_POOLS_MAX_NUM) {
+			DRV_LOG(ERR, "All counter is in used, try again later.");
+			rte_errno = EAGAIN;
+			return 0;
 		}
-		/* Allocate memory for new pool*/
+		/* Allocate memory for new pool */
 		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
 		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
 		if (!pool)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2/5] net/mlx5: fix accessing the wrong counter
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
  2022-10-31 16:08 ` [PATCH 1/5] net/mlx5: fix race condition in counter pool resizing Michael Baum
@ 2022-10-31 16:08 ` Michael Baum
  2022-10-31 16:08 ` [PATCH 3/5] net/mlx5: fix missing counter elements copies in r2r cases Michael Baum
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, jackmin

The HWS counter has 2 different identifiers:
1. Type "cnt_id_t" which represents the counter inside caches and in
   the flow structure. This index cannot be zero and is mostly called
   "cnt_id".
 2. Internal index, the index in counters array with type "uint32_t".
    mostly it is called "iidx".
The second ID is calculated from the first using "mlx5_hws_cnt_iidx()"
function.

When a direct counter is allocated, if the queue cache is not empty, the
counter represented by cnt_id is popped from the cache. This counter may
be invalid according to the query_gen field. Thus, the "iidx" is parsed
from cnt_id and if it is valid, it is used to update the fields of the
counter structure.
When this counter is invalid, all the cache is flashed and new counters
are fetched into the cache. After fetching, another counter represented
by cnt_id is taken from the cache.
Unfortunately, for updating fields like "in_used" or "age_idx", the
function wrongly may use the old "iidx" coming from an invalid cnt_id.

Update the "iidx" in case of an invalid counter popped from the cache.

Fixes: 4d368e1da3a4 ("net/mlx5: support flow counter action for HWS")
Cc: jackmin@nvidia.com

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Xiaoyu Min <jackmin@nvidia.com>
---
 drivers/net/mlx5/mlx5_hws_cnt.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index e311923f71..196604aded 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -506,6 +506,7 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 		rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
 				1, &zcdc, NULL);
 		*cnt_id = *(cnt_id_t *)zcdc.ptr1;
+		iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
 	}
 	__hws_cnt_query_raw(cpool, *cnt_id, &cpool->pool[iidx].reset.hits,
 			    &cpool->pool[iidx].reset.bytes);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/5] net/mlx5: fix missing counter elements copies in r2r cases
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
  2022-10-31 16:08 ` [PATCH 1/5] net/mlx5: fix race condition in counter pool resizing Michael Baum
  2022-10-31 16:08 ` [PATCH 2/5] net/mlx5: fix accessing the wrong counter Michael Baum
@ 2022-10-31 16:08 ` Michael Baum
  2022-10-31 16:08 ` [PATCH 4/5] net/mlx5: add assertions in counter get/put Michael Baum
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, jackmin

The __hws_cnt_r2rcpy() function copies elements from one zero-copy ring
to another zero-copy ring in place.
This routine needs to consider the situation that the address was given
by source and destination could be both wrapped.

It uses 4 different "n" local variables to manage it:
 - n:  Number of elements to copy in total.
 - n1: Number of elements to copy from ptr1, it is the minimal value
       from source/dest n1 field.
 - n2: Number of elements to copy from src->ptr1 to dst->ptr2 or from
       src->ptr2 to dst->ptr1, this variable is 0 when both source and
       dest n1 field are equal.
 - n3: Number of elements to copy from src->ptr2 to dst->ptr2.

The function copies the first n1 elements. If n2 isn't zero it copies
more elements and check whether n3 is zero.
This logic is wrong since n3 may be bigger than zero even when n2 is
zero. This scenario is commonly happening in counters when the internal
mlx5 service thread copies elements from the reset ring into the reuse
ring.

This patch changes the function to copy n3 regardless of n2 value.

Fixes: 4d368e1da3a4 ("net/mlx5: support flow counter action for HWS")
Cc: jackmin@nvidia.com

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Xiaoyu Min <jackmin@nvidia.com>
---
 drivers/net/mlx5/mlx5_hws_cnt.h | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 196604aded..6e371f1929 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -281,11 +281,10 @@ __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
 		d3 = zcdd->ptr2;
 	}
 	memcpy(d1, s1, n1 * sizeof(cnt_id_t));
-	if (n2 != 0) {
+	if (n2 != 0)
 		memcpy(d2, s2, n2 * sizeof(cnt_id_t));
-		if (n3 != 0)
-			memcpy(d3, s3, n3 * sizeof(cnt_id_t));
-	}
+	if (n3 != 0)
+		memcpy(d3, s3, n3 * sizeof(cnt_id_t));
 }
 
 static __rte_always_inline int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/5] net/mlx5: add assertions in counter get/put
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
                   ` (2 preceding siblings ...)
  2022-10-31 16:08 ` [PATCH 3/5] net/mlx5: fix missing counter elements copies in r2r cases Michael Baum
@ 2022-10-31 16:08 ` Michael Baum
  2022-10-31 16:08 ` [PATCH 5/5] net/mlx5: assert for enough space in counter rings Michael Baum
  2022-11-03 11:40 ` [PATCH 0/5] net/mlx5: some counter fixes Raslan Darawsheh
  5 siblings, 0 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, Xiaoyu Min

Add assertions to help debug in case of counter double alloc/free.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Xiaoyu Min <jackmin@nvidia.com>
---
 drivers/net/mlx5/mlx5_hws_cnt.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 6e371f1929..338ee4d688 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -396,6 +396,7 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
 	uint32_t iidx;
 
 	iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
+	MLX5_ASSERT(cpool->pool[iidx].in_used);
 	cpool->pool[iidx].in_used = false;
 	cpool->pool[iidx].query_gen_when_free =
 		__atomic_load_n(&cpool->query_gen, __ATOMIC_RELAXED);
@@ -475,6 +476,7 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 		__hws_cnt_query_raw(cpool, *cnt_id,
 				    &cpool->pool[iidx].reset.hits,
 				    &cpool->pool[iidx].reset.bytes);
+		MLX5_ASSERT(!cpool->pool[iidx].in_used);
 		cpool->pool[iidx].in_used = true;
 		cpool->pool[iidx].age_idx = age_idx;
 		return 0;
@@ -511,6 +513,7 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 			    &cpool->pool[iidx].reset.bytes);
 	rte_ring_dequeue_zc_elem_finish(qcache, 1);
 	cpool->pool[iidx].share = 0;
+	MLX5_ASSERT(!cpool->pool[iidx].in_used);
 	cpool->pool[iidx].in_used = true;
 	cpool->pool[iidx].age_idx = age_idx;
 	return 0;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 5/5] net/mlx5: assert for enough space in counter rings
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
                   ` (3 preceding siblings ...)
  2022-10-31 16:08 ` [PATCH 4/5] net/mlx5: add assertions in counter get/put Michael Baum
@ 2022-10-31 16:08 ` Michael Baum
  2022-11-03 11:40 ` [PATCH 0/5] net/mlx5: some counter fixes Raslan Darawsheh
  5 siblings, 0 replies; 7+ messages in thread
From: Michael Baum @ 2022-10-31 16:08 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, Raslan Darawsheh, Viacheslav Ovsiienko, Xiaoyu Min

There is a by-design assumption in the code that the global counter
rings can contain all the port counters.
So, enqueuing to these global rings should always succeed.

Add assertions to help for debugging this assumption.

In addition, change mlx5_hws_cnt_pool_put() function to return void due
to those assumptions.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Xiaoyu Min <jackmin@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c |   2 +-
 drivers/net/mlx5/mlx5_hws_cnt.c |  25 ++++----
 drivers/net/mlx5/mlx5_hws_cnt.h | 106 +++++++++++++++++---------------
 3 files changed, 72 insertions(+), 61 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 2d275ad111..54a0afe45f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7874,7 +7874,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 			 * time to update the AGE.
 			 */
 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
-		ret = mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
+		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
 		break;
 	case MLX5_INDIRECT_ACTION_TYPE_CT:
 		ret = flow_hw_conntrack_destroy(dev, act_idx, error);
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index b8ce69af57..24c01eace0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -58,13 +58,14 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
 
 static void
 __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
-		struct mlx5_hws_cnt_pool *cpool)
+		   struct mlx5_hws_cnt_pool *cpool)
 {
 	struct rte_ring *reset_list = cpool->wait_reset_list;
 	struct rte_ring *reuse_list = cpool->reuse_list;
 	uint32_t reset_cnt_num;
 	struct rte_ring_zc_data zcdr = {0};
 	struct rte_ring_zc_data zcdu = {0};
+	uint32_t ret __rte_unused;
 
 	reset_cnt_num = rte_ring_count(reset_list);
 	do {
@@ -72,17 +73,19 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
 		mlx5_aso_cnt_query(sh, cpool);
 		zcdr.n1 = 0;
 		zcdu.n1 = 0;
-		rte_ring_enqueue_zc_burst_elem_start(reuse_list,
-				sizeof(cnt_id_t), reset_cnt_num, &zcdu,
-				NULL);
-		rte_ring_dequeue_zc_burst_elem_start(reset_list,
-				sizeof(cnt_id_t), reset_cnt_num, &zcdr,
-				NULL);
+		ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list,
+							   sizeof(cnt_id_t),
+							   reset_cnt_num, &zcdu,
+							   NULL);
+		MLX5_ASSERT(ret == reset_cnt_num);
+		ret = rte_ring_dequeue_zc_burst_elem_start(reset_list,
+							   sizeof(cnt_id_t),
+							   reset_cnt_num, &zcdr,
+							   NULL);
+		MLX5_ASSERT(ret == reset_cnt_num);
 		__hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num);
-		rte_ring_dequeue_zc_elem_finish(reset_list,
-				reset_cnt_num);
-		rte_ring_enqueue_zc_elem_finish(reuse_list,
-				reset_cnt_num);
+		rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num);
+		rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num);
 		reset_cnt_num = rte_ring_count(reset_list);
 	} while (reset_cnt_num > 0);
 }
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 338ee4d688..030dcead86 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -116,7 +116,7 @@ enum {
 	HWS_AGE_CANDIDATE_INSIDE_RING,
 	/*
 	 * AGE assigned to flows but it still in ring. It was aged-out but the
-	 * timeout was changed, so it in ring but stiil candidate.
+	 * timeout was changed, so it in ring but still candidate.
 	 */
 	HWS_AGE_AGED_OUT_REPORTED,
 	/*
@@ -182,7 +182,7 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
  *
  * @param cpool
  *   The pointer to counter pool
- * @param index
+ * @param iidx
  *   The internal counter index.
  *
  * @return
@@ -231,32 +231,32 @@ __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
 }
 
 /**
- * Copy elems from one zero-copy ring to zero-copy ring in place.
+ * Copy elements from one zero-copy ring to zero-copy ring in place.
  *
  * The input is a rte ring zero-copy data struct, which has two pointer.
  * in case of the wrapper happened, the ptr2 will be meaningful.
  *
- * So this rountin needs to consider the situation that the address given by
+ * So this routine needs to consider the situation that the address given by
  * source and destination could be both wrapped.
  * First, calculate the first number of element needs to be copied until wrapped
  * address, which could be in source or destination.
  * Second, copy left number of element until second wrapped address. If in first
  * step the wrapped address is source, then this time it must be in destination.
- * and vice-vers.
- * Third, copy all left numbe of element.
+ * and vice-versa.
+ * Third, copy all left number of element.
  *
  * In worst case, we need copy three pieces of continuous memory.
  *
  * @param zcdd
- *   A pointer to zero-copy data of dest ring.
+ *   A pointer to zero-copy data of destination ring.
  * @param zcds
  *   A pointer to zero-copy data of source ring.
  * @param n
- *   Number of elems to copy.
+ *   Number of elements to copy.
  */
 static __rte_always_inline void
 __hws_cnt_r2rcpy(struct rte_ring_zc_data *zcdd, struct rte_ring_zc_data *zcds,
-		unsigned int n)
+		 unsigned int n)
 {
 	unsigned int n1, n2, n3;
 	void *s1, *s2, *s3;
@@ -291,22 +291,23 @@ static __rte_always_inline int
 mlx5_hws_cnt_pool_cache_flush(struct mlx5_hws_cnt_pool *cpool,
 			      uint32_t queue_id)
 {
-	unsigned int ret;
+	unsigned int ret __rte_unused;
 	struct rte_ring_zc_data zcdr = {0};
 	struct rte_ring_zc_data zcdc = {0};
 	struct rte_ring *reset_list = NULL;
 	struct rte_ring *qcache = cpool->cache->qcache[queue_id];
+	uint32_t ring_size = rte_ring_count(qcache);
 
-	ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
-			sizeof(cnt_id_t), rte_ring_count(qcache), &zcdc,
-			NULL);
-	MLX5_ASSERT(ret);
+	ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
+						   ring_size, &zcdc, NULL);
+	MLX5_ASSERT(ret == ring_size);
 	reset_list = cpool->wait_reset_list;
-	rte_ring_enqueue_zc_burst_elem_start(reset_list,
-			sizeof(cnt_id_t), ret, &zcdr, NULL);
-	__hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
-	rte_ring_enqueue_zc_elem_finish(reset_list, ret);
-	rte_ring_dequeue_zc_elem_finish(qcache, ret);
+	ret = rte_ring_enqueue_zc_burst_elem_start(reset_list, sizeof(cnt_id_t),
+						   ring_size, &zcdr, NULL);
+	MLX5_ASSERT(ret == ring_size);
+	__hws_cnt_r2rcpy(&zcdr, &zcdc, ring_size);
+	rte_ring_enqueue_zc_elem_finish(reset_list, ring_size);
+	rte_ring_dequeue_zc_elem_finish(qcache, ring_size);
 	return 0;
 }
 
@@ -323,7 +324,7 @@ mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
 	struct rte_ring_zc_data zcdu = {0};
 	struct rte_ring_zc_data zcds = {0};
 	struct mlx5_hws_cnt_pool_caches *cache = cpool->cache;
-	unsigned int ret;
+	unsigned int ret, actual_fetch_size __rte_unused;
 
 	reuse_list = cpool->reuse_list;
 	ret = rte_ring_dequeue_zc_burst_elem_start(reuse_list,
@@ -334,7 +335,9 @@ mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
 		rte_ring_dequeue_zc_elem_finish(reuse_list, 0);
 		free_list = cpool->free_list;
 		ret = rte_ring_dequeue_zc_burst_elem_start(free_list,
-				sizeof(cnt_id_t), cache->fetch_sz, &zcdf, NULL);
+							   sizeof(cnt_id_t),
+							   cache->fetch_sz,
+							   &zcdf, NULL);
 		zcds = zcdf;
 		list = free_list;
 		if (unlikely(ret == 0)) { /* no free counter. */
@@ -344,8 +347,10 @@ mlx5_hws_cnt_pool_cache_fetch(struct mlx5_hws_cnt_pool *cpool,
 			return -ENOENT;
 		}
 	}
-	rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
-			ret, &zcdc, NULL);
+	actual_fetch_size = ret;
+	ret = rte_ring_enqueue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
+						   ret, &zcdc, NULL);
+	MLX5_ASSERT(ret == actual_fetch_size);
 	__hws_cnt_r2rcpy(&zcdc, &zcds, ret);
 	rte_ring_dequeue_zc_elem_finish(list, ret);
 	rte_ring_enqueue_zc_elem_finish(qcache, ret);
@@ -378,15 +383,14 @@ __mlx5_hws_cnt_pool_enqueue_revert(struct rte_ring *r, unsigned int n,
  *
  * @param cpool
  *   A pointer to the counter pool structure.
+ * @param queue
+ *   A pointer to HWS queue. If null, it means put into common pool.
  * @param cnt_id
  *   A counter id to be added.
- * @return
- *   - 0: Success; object taken
- *   - -ENOENT: not enough entry in pool
  */
-static __rte_always_inline int
-mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
-		uint32_t *queue, cnt_id_t *cnt_id)
+static __rte_always_inline void
+mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
+		      cnt_id_t *cnt_id)
 {
 	unsigned int ret = 0;
 	struct rte_ring_zc_data zcdc = {0};
@@ -404,25 +408,29 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
 		qcache = cpool->cache->qcache[*queue];
 	if (unlikely(qcache == NULL)) {
 		ret = rte_ring_enqueue_elem(cpool->wait_reset_list, cnt_id,
-				sizeof(cnt_id_t));
+					    sizeof(cnt_id_t));
 		MLX5_ASSERT(ret == 0);
-		return ret;
+		return;
 	}
 	ret = rte_ring_enqueue_burst_elem(qcache, cnt_id, sizeof(cnt_id_t), 1,
 					  NULL);
 	if (unlikely(ret == 0)) { /* cache is full. */
+		struct rte_ring *reset_list = cpool->wait_reset_list;
+
 		wb_num = rte_ring_count(qcache) - cpool->cache->threshold;
 		MLX5_ASSERT(wb_num < rte_ring_count(qcache));
 		__mlx5_hws_cnt_pool_enqueue_revert(qcache, wb_num, &zcdc);
-		rte_ring_enqueue_zc_burst_elem_start(cpool->wait_reset_list,
-				sizeof(cnt_id_t), wb_num, &zcdr, NULL);
-		__hws_cnt_r2rcpy(&zcdr, &zcdc, wb_num);
-		rte_ring_enqueue_zc_elem_finish(cpool->wait_reset_list, wb_num);
+		ret = rte_ring_enqueue_zc_burst_elem_start(reset_list,
+							   sizeof(cnt_id_t),
+							   wb_num, &zcdr, NULL);
+		MLX5_ASSERT(ret == wb_num);
+		__hws_cnt_r2rcpy(&zcdr, &zcdc, ret);
+		rte_ring_enqueue_zc_elem_finish(reset_list, ret);
 		/* write-back THIS counter too */
-		ret = rte_ring_enqueue_burst_elem(cpool->wait_reset_list,
-				cnt_id, sizeof(cnt_id_t), 1, NULL);
+		ret = rte_ring_enqueue_burst_elem(reset_list, cnt_id,
+						  sizeof(cnt_id_t), 1, NULL);
 	}
-	return ret == 1 ? 0 : -ENOENT;
+	MLX5_ASSERT(ret == 1);
 }
 
 /**
@@ -482,15 +490,17 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 		return 0;
 	}
 	ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
-			&zcdc, NULL);
+						   &zcdc, NULL);
 	if (unlikely(ret == 0)) { /* local cache is empty. */
 		rte_ring_dequeue_zc_elem_finish(qcache, 0);
 		/* let's fetch from global free list. */
 		ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
 		if (unlikely(ret != 0))
 			return ret;
-		rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
-				1, &zcdc, NULL);
+		ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
+							   sizeof(cnt_id_t), 1,
+							   &zcdc, NULL);
+		MLX5_ASSERT(ret == 1);
 	}
 	/* get one from local cache. */
 	*cnt_id = (*(cnt_id_t *)zcdc.ptr1);
@@ -504,8 +514,10 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 		ret = mlx5_hws_cnt_pool_cache_fetch(cpool, *queue);
 		if (unlikely(ret != 0))
 			return ret;
-		rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t),
-				1, &zcdc, NULL);
+		ret = rte_ring_dequeue_zc_burst_elem_start(qcache,
+							   sizeof(cnt_id_t), 1,
+							   &zcdc, NULL);
+		MLX5_ASSERT(ret == 1);
 		*cnt_id = *(cnt_id_t *)zcdc.ptr1;
 		iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
 	}
@@ -553,17 +565,13 @@ mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
 	return 0;
 }
 
-static __rte_always_inline int
+static __rte_always_inline void
 mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
 {
-	int ret;
 	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
 
 	cpool->pool[iidx].share = 0;
-	ret = mlx5_hws_cnt_pool_put(cpool, NULL, cnt_id);
-	if (unlikely(ret != 0))
-		cpool->pool[iidx].share = 1; /* fail to release, restore. */
-	return ret;
+	mlx5_hws_cnt_pool_put(cpool, NULL, cnt_id);
 }
 
 static __rte_always_inline bool
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* RE: [PATCH 0/5] net/mlx5: some counter fixes
  2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
                   ` (4 preceding siblings ...)
  2022-10-31 16:08 ` [PATCH 5/5] net/mlx5: assert for enough space in counter rings Michael Baum
@ 2022-11-03 11:40 ` Raslan Darawsheh
  5 siblings, 0 replies; 7+ messages in thread
From: Raslan Darawsheh @ 2022-11-03 11:40 UTC (permalink / raw)
  To: Michael Baum, dev; +Cc: Matan Azrad, Slava Ovsiienko

Hi,

> -----Original Message-----
> From: Michael Baum <michaelba@nvidia.com>
> Sent: Monday, October 31, 2022 6:08 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH 0/5] net/mlx5: some counter fixes
> 
> Some fixes for HW/SW steering counters.
> 
> Michael Baum (5):
>   net/mlx5: fix race condition in counter pool resizing
>   net/mlx5: fix accessing the wrong counter
>   net/mlx5: fix missing counter elements copies in r2r cases
>   net/mlx5: add assertions in counter get/put
>   net/mlx5: assert for enough space in counter rings
> 
>  drivers/net/mlx5/mlx5.c            |  28 ++++++-
>  drivers/net/mlx5/mlx5.h            |   7 +-
>  drivers/net/mlx5/mlx5_flow.c       |  24 +++---
>  drivers/net/mlx5/mlx5_flow_dv.c    |  53 +++----------
>  drivers/net/mlx5/mlx5_flow_hw.c    |   2 +-
>  drivers/net/mlx5/mlx5_flow_verbs.c |  23 ++----
>  drivers/net/mlx5/mlx5_hws_cnt.c    |  25 +++---
>  drivers/net/mlx5/mlx5_hws_cnt.h    | 117 ++++++++++++++++-------------
>  8 files changed, 131 insertions(+), 148 deletions(-)
> 
> --
> 2.25.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-11-03 11:40 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-31 16:08 [PATCH 0/5] net/mlx5: some counter fixes Michael Baum
2022-10-31 16:08 ` [PATCH 1/5] net/mlx5: fix race condition in counter pool resizing Michael Baum
2022-10-31 16:08 ` [PATCH 2/5] net/mlx5: fix accessing the wrong counter Michael Baum
2022-10-31 16:08 ` [PATCH 3/5] net/mlx5: fix missing counter elements copies in r2r cases Michael Baum
2022-10-31 16:08 ` [PATCH 4/5] net/mlx5: add assertions in counter get/put Michael Baum
2022-10-31 16:08 ` [PATCH 5/5] net/mlx5: assert for enough space in counter rings Michael Baum
2022-11-03 11:40 ` [PATCH 0/5] net/mlx5: some counter fixes Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).