DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>, <orika@nvidia.com>,
	<rasland@nvidia.com>, <matan@nvidia.com>,
	<arybchenko@solarflare.com>
Subject: [PATCH 6/9] net/mlx5: support counters in cross port shared mode
Date: Mon, 6 Feb 2023 11:52:26 +0200	[thread overview]
Message-ID: <20230206095229.23027-6-viacheslavo@nvidia.com> (raw)
In-Reply-To: <20230206095229.23027-1-viacheslavo@nvidia.com>

In the cross vHCA sharing mode the host counter pool
should be used in counter related routines. The local
port pool is used to store the dedicated DR action
handle, per queue counter caches and query data are
ignored and not allocated on local pool.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c |  12 ++-
 drivers/net/mlx5/mlx5_hws_cnt.c | 163 ++++++++++++++++----------------
 drivers/net/mlx5/mlx5_hws_cnt.h | 109 +++++++++++----------
 3 files changed, 150 insertions(+), 134 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index fc87e687c9..1495bc384e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2311,8 +2311,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 				break;
 			/* Fall-through. */
 		case RTE_FLOW_ACTION_TYPE_COUNT:
-			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue,
-						    &cnt_id, age_idx);
+			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool,
+					(priv->shared_refcnt ||
+					 priv->hws_cpool->cfg.host_cpool) ?
+					NULL : &queue, &cnt_id, age_idx);
 			if (ret != 0)
 				return ret;
 			ret = mlx5_hws_cnt_pool_get_action_offset
@@ -7997,6 +7999,7 @@ static int
 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
 		      void *data, struct rte_flow_error *error)
 {
+	struct mlx5_hws_cnt_pool *hpool;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hws_cnt *cnt;
 	struct rte_flow_query_count *qc = data;
@@ -8007,8 +8010,9 @@ flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 				"counter are not available");
-	iidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);
-	cnt = &priv->hws_cpool->pool[iidx];
+	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
+	iidx = mlx5_hws_cnt_iidx(hpool, counter);
+	cnt = &hpool->pool[iidx];
 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
 	qc->hits_set = 1;
 	qc->bytes_set = 1;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index afc93821e4..12b2cb0b27 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -24,12 +24,8 @@
 static void
 __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
 {
-	uint32_t preload;
-	uint32_t q_num = cpool->cache->q_num;
 	uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
-	cnt_id_t cnt_id;
-	uint32_t qidx, iidx = 0;
-	struct rte_ring *qcache = NULL;
+	uint32_t iidx;
 
 	/*
 	 * Counter ID order is important for tracking the max number of in used
@@ -39,18 +35,9 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
 	 * and then the global free list.
 	 * In the end, user fetch the counter from minimal to the maximum.
 	 */
-	preload = RTE_MIN(cpool->cache->preload_sz, cnt_num / q_num);
-	for (qidx = 0; qidx < q_num; qidx++) {
-		for (; iidx < preload * (qidx + 1); iidx++) {
-			cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx);
-			qcache = cpool->cache->qcache[qidx];
-			if (qcache)
-				rte_ring_enqueue_elem(qcache, &cnt_id,
-						sizeof(cnt_id));
-		}
-	}
-	for (; iidx < cnt_num; iidx++) {
-		cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx);
+	for (iidx = 0; iidx < cnt_num; iidx++) {
+		cnt_id_t cnt_id  = mlx5_hws_cnt_id_gen(cpool, iidx);
+
 		rte_ring_enqueue_elem(cpool->free_list, &cnt_id,
 				sizeof(cnt_id));
 	}
@@ -334,7 +321,26 @@ mlx5_hws_cnt_svc(void *opaque)
 	return NULL;
 }
 
-struct mlx5_hws_cnt_pool *
+static void
+mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp)
+{
+	uint32_t qidx = 0;
+	if (cntp == NULL)
+		return;
+	rte_ring_free(cntp->free_list);
+	rte_ring_free(cntp->wait_reset_list);
+	rte_ring_free(cntp->reuse_list);
+	if (cntp->cache) {
+		for (qidx = 0; qidx < cntp->cache->q_num; qidx++)
+			rte_ring_free(cntp->cache->qcache[qidx]);
+	}
+	mlx5_free(cntp->cache);
+	mlx5_free(cntp->raw_mng);
+	mlx5_free(cntp->pool);
+	mlx5_free(cntp);
+}
+
+static struct mlx5_hws_cnt_pool *
 mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
 		       const struct mlx5_hws_cnt_pool_cfg *pcfg,
 		       const struct mlx5_hws_cache_param *ccfg)
@@ -352,6 +358,8 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
 		return NULL;
 
 	cntp->cfg = *pcfg;
+	if (cntp->cfg.host_cpool)
+		return cntp;
 	cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
 			sizeof(*cntp->cache) +
 			sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])
@@ -387,8 +395,9 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
 		goto error;
 	snprintf(mz_name, sizeof(mz_name), "%s_F_RING", pcfg->name);
 	cntp->free_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t),
-			(uint32_t)cnt_num, SOCKET_ID_ANY,
-			RING_F_SP_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ);
+				(uint32_t)cnt_num, SOCKET_ID_ANY,
+				RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |
+				RING_F_EXACT_SZ);
 	if (cntp->free_list == NULL) {
 		DRV_LOG(ERR, "failed to create free list ring");
 		goto error;
@@ -404,7 +413,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
 	snprintf(mz_name, sizeof(mz_name), "%s_U_RING", pcfg->name);
 	cntp->reuse_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t),
 			(uint32_t)cnt_num, SOCKET_ID_ANY,
-			RING_F_SP_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ);
+			RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ);
 	if (cntp->reuse_list == NULL) {
 		DRV_LOG(ERR, "failed to create reuse list ring");
 		goto error;
@@ -427,25 +436,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
 	return NULL;
 }
 
-void
-mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp)
-{
-	uint32_t qidx = 0;
-	if (cntp == NULL)
-		return;
-	rte_ring_free(cntp->free_list);
-	rte_ring_free(cntp->wait_reset_list);
-	rte_ring_free(cntp->reuse_list);
-	if (cntp->cache) {
-		for (qidx = 0; qidx < cntp->cache->q_num; qidx++)
-			rte_ring_free(cntp->cache->qcache[qidx]);
-	}
-	mlx5_free(cntp->cache);
-	mlx5_free(cntp->raw_mng);
-	mlx5_free(cntp->pool);
-	mlx5_free(cntp);
-}
-
 int
 mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh)
 {
@@ -482,7 +472,7 @@ mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh)
 	sh->cnt_svc->service_thread = 0;
 }
 
-int
+static int
 mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
 			    struct mlx5_hws_cnt_pool *cpool)
 {
@@ -494,6 +484,7 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
 	struct mlx5_devx_counter_attr attr = {0};
 	struct mlx5_devx_obj *dcs;
 
+	MLX5_ASSERT(cpool->cfg.host_cpool == NULL);
 	if (hca_attr->flow_counter_bulk_log_max_alloc == 0) {
 		DRV_LOG(ERR, "Fw doesn't support bulk log max alloc");
 		return -1;
@@ -549,7 +540,7 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
 	return -1;
 }
 
-void
+static void
 mlx5_hws_cnt_pool_dcs_free(struct mlx5_dev_ctx_shared *sh,
 			   struct mlx5_hws_cnt_pool *cpool)
 {
@@ -565,22 +556,39 @@ mlx5_hws_cnt_pool_dcs_free(struct mlx5_dev_ctx_shared *sh,
 	}
 }
 
-int
+static void
+mlx5_hws_cnt_pool_action_destroy(struct mlx5_hws_cnt_pool *cpool)
+{
+	uint32_t idx;
+
+	for (idx = 0; idx < cpool->dcs_mng.batch_total; idx++) {
+		struct mlx5_hws_cnt_dcs *dcs = &cpool->dcs_mng.dcs[idx];
+
+		if (dcs->dr_action != NULL) {
+			mlx5dr_action_destroy(dcs->dr_action);
+			dcs->dr_action = NULL;
+		}
+	}
+}
+
+static int
 mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv,
 		struct mlx5_hws_cnt_pool *cpool)
 {
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
 	uint32_t idx;
 	int ret = 0;
-	struct mlx5_hws_cnt_dcs *dcs;
 	uint32_t flags;
 
 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
 	if (priv->sh->config.dv_esw_en && priv->master)
 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
-	for (idx = 0; idx < cpool->dcs_mng.batch_total; idx++) {
-		dcs = &cpool->dcs_mng.dcs[idx];
+	for (idx = 0; idx < hpool->dcs_mng.batch_total; idx++) {
+		struct mlx5_hws_cnt_dcs *hdcs = &hpool->dcs_mng.dcs[idx];
+		struct mlx5_hws_cnt_dcs *dcs = &cpool->dcs_mng.dcs[idx];
+
 		dcs->dr_action = mlx5dr_action_create_counter(priv->dr_ctx,
-					(struct mlx5dr_devx_obj *)dcs->obj,
+					(struct mlx5dr_devx_obj *)hdcs->obj,
 					flags);
 		if (dcs->dr_action == NULL) {
 			mlx5_hws_cnt_pool_action_destroy(cpool);
@@ -591,21 +599,6 @@ mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv,
 	return ret;
 }
 
-void
-mlx5_hws_cnt_pool_action_destroy(struct mlx5_hws_cnt_pool *cpool)
-{
-	uint32_t idx;
-	struct mlx5_hws_cnt_dcs *dcs;
-
-	for (idx = 0; idx < cpool->dcs_mng.batch_total; idx++) {
-		dcs = &cpool->dcs_mng.dcs[idx];
-		if (dcs->dr_action != NULL) {
-			mlx5dr_action_destroy(dcs->dr_action);
-			dcs->dr_action = NULL;
-		}
-	}
-}
-
 struct mlx5_hws_cnt_pool *
 mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
 		const struct rte_flow_port_attr *pattr, uint16_t nb_queue)
@@ -618,11 +611,28 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
 	int ret = 0;
 	size_t sz;
 
+	mp_name = mlx5_malloc(MLX5_MEM_ZERO, RTE_MEMZONE_NAMESIZE, 0,
+			SOCKET_ID_ANY);
+	if (mp_name == NULL)
+		goto error;
+	snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_POOL_%u",
+			dev->data->port_id);
+	pcfg.name = mp_name;
+	pcfg.request_num = pattr->nb_counters;
+	pcfg.alloc_factor = HWS_CNT_ALLOC_FACTOR_DEFAULT;
 	if (pattr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
-		DRV_LOG(ERR, "Counters are not supported "
-			     "in cross vHCA sharing mode");
-		rte_errno = ENOTSUP;
-		return NULL;
+		struct mlx5_priv *host_priv =
+				priv->shared_host->data->dev_private;
+		struct mlx5_hws_cnt_pool *chost = host_priv->hws_cpool;
+
+		pcfg.host_cpool = chost;
+		cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam);
+		if (cpool == NULL)
+			goto error;
+		ret = mlx5_hws_cnt_pool_action_create(priv, cpool);
+		if (ret != 0)
+			goto error;
+		return cpool;
 	}
 	/* init cnt service if not. */
 	if (priv->sh->cnt_svc == NULL) {
@@ -635,15 +645,6 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
 	cparam.q_num = nb_queue;
 	cparam.threshold = HWS_CNT_CACHE_THRESHOLD_DEFAULT;
 	cparam.size = HWS_CNT_CACHE_SZ_DEFAULT;
-	pcfg.alloc_factor = HWS_CNT_ALLOC_FACTOR_DEFAULT;
-	mp_name = mlx5_malloc(MLX5_MEM_ZERO, RTE_MEMZONE_NAMESIZE, 0,
-			SOCKET_ID_ANY);
-	if (mp_name == NULL)
-		goto error;
-	snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_POOL_%u",
-			dev->data->port_id);
-	pcfg.name = mp_name;
-	pcfg.request_num = pattr->nb_counters;
 	cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam);
 	if (cpool == NULL)
 		goto error;
@@ -678,11 +679,15 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,
 {
 	if (cpool == NULL)
 		return;
-	if (--sh->cnt_svc->refcnt == 0)
-		mlx5_hws_cnt_svc_deinit(sh);
+	if (cpool->cfg.host_cpool == NULL) {
+		if (--sh->cnt_svc->refcnt == 0)
+			mlx5_hws_cnt_svc_deinit(sh);
+	}
 	mlx5_hws_cnt_pool_action_destroy(cpool);
-	mlx5_hws_cnt_pool_dcs_free(sh, cpool);
-	mlx5_hws_cnt_raw_data_free(sh, cpool->raw_mng);
+	if (cpool->cfg.host_cpool == NULL) {
+		mlx5_hws_cnt_pool_dcs_free(sh, cpool);
+		mlx5_hws_cnt_raw_data_free(sh, cpool->raw_mng);
+	}
 	mlx5_free((void *)cpool->cfg.name);
 	mlx5_hws_cnt_pool_deinit(cpool);
 }
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 030dcead86..d35d083eeb 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -86,6 +86,7 @@ struct mlx5_hws_cnt_pool_cfg {
 	char *name;
 	uint32_t request_num;
 	uint32_t alloc_factor;
+	struct mlx5_hws_cnt_pool *host_cpool;
 };
 
 struct mlx5_hws_cnt_pool_caches {
@@ -148,6 +149,22 @@ struct mlx5_hws_age_param {
 	void *context; /* Flow AGE context. */
 } __rte_packed __rte_cache_aligned;
 
+
+/**
+ * Return the actual counter pool should be used in cross vHCA sharing mode.
+ * as index of raw/cnt pool.
+ *
+ * @param cnt_id
+ *   The external counter id
+ * @return
+ *   Internal index
+ */
+static __always_inline struct mlx5_hws_cnt_pool *
+mlx5_hws_cnt_host_pool(struct mlx5_hws_cnt_pool *cpool)
+{
+	return cpool->cfg.host_cpool ? cpool->cfg.host_cpool : cpool;
+}
+
 /**
  * Translate counter id into internal index (start from 0), which can be used
  * as index of raw/cnt pool.
@@ -160,11 +177,12 @@ struct mlx5_hws_age_param {
 static __rte_always_inline uint32_t
 mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
 {
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
 	uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
 	uint32_t offset = cnt_id & MLX5_HWS_CNT_IDX_MASK;
 
 	dcs_idx &= MLX5_HWS_CNT_DCS_IDX_MASK;
-	return (cpool->dcs_mng.dcs[dcs_idx].iidx + offset);
+	return (hpool->dcs_mng.dcs[dcs_idx].iidx + offset);
 }
 
 /**
@@ -191,7 +209,8 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
 static __rte_always_inline cnt_id_t
 mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
 {
-	struct mlx5_hws_cnt_dcs_mng *dcs_mng = &cpool->dcs_mng;
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	struct mlx5_hws_cnt_dcs_mng *dcs_mng = &hpool->dcs_mng;
 	uint32_t idx;
 	uint32_t offset;
 	cnt_id_t cnt_id;
@@ -212,7 +231,8 @@ static __rte_always_inline void
 __hws_cnt_query_raw(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
 		uint64_t *raw_pkts, uint64_t *raw_bytes)
 {
-	struct mlx5_hws_cnt_raw_data_mng *raw_mng = cpool->raw_mng;
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	struct mlx5_hws_cnt_raw_data_mng *raw_mng = hpool->raw_mng;
 	struct flow_counter_stats s[2];
 	uint8_t i = 0x1;
 	size_t stat_sz = sizeof(s[0]);
@@ -393,22 +413,23 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 		      cnt_id_t *cnt_id)
 {
 	unsigned int ret = 0;
+	struct mlx5_hws_cnt_pool *hpool;
 	struct rte_ring_zc_data zcdc = {0};
 	struct rte_ring_zc_data zcdr = {0};
 	struct rte_ring *qcache = NULL;
 	unsigned int wb_num = 0; /* cache write-back number. */
 	uint32_t iidx;
 
-	iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
-	MLX5_ASSERT(cpool->pool[iidx].in_used);
-	cpool->pool[iidx].in_used = false;
-	cpool->pool[iidx].query_gen_when_free =
-		__atomic_load_n(&cpool->query_gen, __ATOMIC_RELAXED);
-	if (likely(queue != NULL))
-		qcache = cpool->cache->qcache[*queue];
+	hpool = mlx5_hws_cnt_host_pool(cpool);
+	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
+	hpool->pool[iidx].in_used = false;
+	hpool->pool[iidx].query_gen_when_free =
+		__atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+	if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
+		qcache = hpool->cache->qcache[*queue];
 	if (unlikely(qcache == NULL)) {
-		ret = rte_ring_enqueue_elem(cpool->wait_reset_list, cnt_id,
-					    sizeof(cnt_id_t));
+		ret = rte_ring_enqueue_elem(hpool->wait_reset_list, cnt_id,
+				sizeof(cnt_id_t));
 		MLX5_ASSERT(ret == 0);
 		return;
 	}
@@ -465,9 +486,10 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 	uint32_t iidx, query_gen = 0;
 	cnt_id_t tmp_cid = 0;
 
-	if (likely(queue != NULL))
+	if (likely(queue != NULL && cpool->cfg.host_cpool == NULL))
 		qcache = cpool->cache->qcache[*queue];
 	if (unlikely(qcache == NULL)) {
+		cpool = mlx5_hws_cnt_host_pool(cpool);
 		ret = rte_ring_dequeue_elem(cpool->reuse_list, &tmp_cid,
 				sizeof(cnt_id_t));
 		if (unlikely(ret != 0)) {
@@ -534,7 +556,9 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
 static __rte_always_inline unsigned int
 mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool)
 {
-	return rte_ring_get_capacity(cpool->free_list);
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+
+	return rte_ring_get_capacity(hpool->free_list);
 }
 
 static __rte_always_inline int
@@ -554,51 +578,56 @@ static __rte_always_inline int
 mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
 			uint32_t age_idx)
 {
-	int ret;
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
 	uint32_t iidx;
+	int ret;
 
-	ret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id, age_idx);
+	ret = mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx);
 	if (ret != 0)
 		return ret;
-	iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
-	cpool->pool[iidx].share = 1;
+	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
+	hpool->pool[iidx].share = 1;
 	return 0;
 }
 
 static __rte_always_inline void
 mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
 {
-	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
 
-	cpool->pool[iidx].share = 0;
-	mlx5_hws_cnt_pool_put(cpool, NULL, cnt_id);
+	hpool->pool[iidx].share = 0;
+	mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
 }
 
 static __rte_always_inline bool
 mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
 {
-	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
 
-	return cpool->pool[iidx].share ? true : false;
+	return hpool->pool[iidx].share ? true : false;
 }
 
 static __rte_always_inline void
 mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
 		     uint32_t age_idx)
 {
-	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
 
-	MLX5_ASSERT(cpool->pool[iidx].share);
-	cpool->pool[iidx].age_idx = age_idx;
+	MLX5_ASSERT(hpool->pool[iidx].share);
+	hpool->pool[iidx].age_idx = age_idx;
 }
 
 static __rte_always_inline uint32_t
 mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
 {
-	uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
+	struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
+	uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
 
-	MLX5_ASSERT(cpool->pool[iidx].share);
-	return cpool->pool[iidx].age_idx;
+	MLX5_ASSERT(hpool->pool[iidx].share);
+	return hpool->pool[iidx].age_idx;
 }
 
 static __rte_always_inline cnt_id_t
@@ -645,34 +674,12 @@ mlx5_hws_age_is_indirect(uint32_t age_idx)
 }
 
 /* init HWS counter pool. */
-struct mlx5_hws_cnt_pool *
-mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
-		       const struct mlx5_hws_cnt_pool_cfg *pcfg,
-		       const struct mlx5_hws_cache_param *ccfg);
-
-void
-mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool *cntp);
-
 int
 mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh);
 
 void
 mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh);
 
-int
-mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
-		struct mlx5_hws_cnt_pool *cpool);
-void
-mlx5_hws_cnt_pool_dcs_free(struct mlx5_dev_ctx_shared *sh,
-		struct mlx5_hws_cnt_pool *cpool);
-
-int
-mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv,
-		struct mlx5_hws_cnt_pool *cpool);
-
-void
-mlx5_hws_cnt_pool_action_destroy(struct mlx5_hws_cnt_pool *cpool);
-
 struct mlx5_hws_cnt_pool *
 mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
 		const struct rte_flow_port_attr *pattr, uint16_t nb_queue);
-- 
2.18.1


  parent reply	other threads:[~2023-02-06  9:53 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-28 16:54 [RFC] ethdev: sharing indirect actions between ports Viacheslav Ovsiienko
2023-01-08 14:20 ` Ori Kam
2023-01-18 12:07 ` Thomas Monjalon
2023-01-18 15:17   ` Ori Kam
2023-01-18 16:21     ` Thomas Monjalon
2023-01-18 16:37       ` Slava Ovsiienko
2023-01-20 12:22         ` Andrew Rybchenko
2023-01-26 15:15           ` Ori Kam
2023-02-06  9:52 ` [PATCH 1/9] " Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 2/9] net/mlx5/hws: Matcher, Free FT from RTC id before set the new value Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 3/9] net/mlx5/hws: fix disconnecting matcher Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 4/9] common/mlx5: add cross port object sharing capability Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 5/9] net/mlx5: add cross port shared mode for HW steering Viacheslav Ovsiienko
2023-02-06  9:52   ` Viacheslav Ovsiienko [this message]
2023-02-06  9:52   ` [PATCH 7/9] app/testpmd: add host port parameter into flow config Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 8/9] app/testpmd: add shared indirect action support Viacheslav Ovsiienko
2023-02-06  9:52   ` [PATCH 9/9] doc: update cross-port indirect shared action Viacheslav Ovsiienko
2023-02-07 14:01   ` [PATCH v2 1/9] ethdev: sharing indirect actions between ports Viacheslav Ovsiienko
2023-02-07 14:01     ` [PATCH v2 2/9] app/testpmd: add host port parameter into flow config Viacheslav Ovsiienko
2023-02-09 14:48       ` Ori Kam
2023-02-07 14:02     ` [PATCH v2 3/9] app/testpmd: add shared indirect action support Viacheslav Ovsiienko
2023-02-09 14:48       ` Ori Kam
2023-02-07 14:02     ` [PATCH v2 4/9] net/mlx5/hws: free FT from RTC id before set the new value Viacheslav Ovsiienko
2023-02-07 14:02     ` [PATCH v2 5/9] net/mlx5/hws: fix disconnecting matcher Viacheslav Ovsiienko
2023-02-07 14:02     ` [PATCH v2 6/9] common/mlx5: add cross port object sharing capability Viacheslav Ovsiienko
2023-02-07 14:02     ` [PATCH v2 7/9] net/mlx5: add cross port shared mode for HW steering Viacheslav Ovsiienko
2023-02-07 14:02     ` [PATCH v2 8/9] net/mlx5: support counters in cross port shared mode Viacheslav Ovsiienko
2023-02-07 14:02     ` [PATCH v2 9/9] doc: update cross-port indirect shared action Viacheslav Ovsiienko
2023-02-09 14:49       ` Ori Kam
2023-02-10 14:35       ` Ferruh Yigit
2023-02-08 12:21     ` [PATCH v2 1/9] ethdev: sharing indirect actions between ports Ori Kam
2023-02-09 14:47     ` Ori Kam
2023-02-10 14:34     ` Ferruh Yigit
2023-02-10 14:38       ` Slava Ovsiienko
2023-02-10 15:17   ` [PATCH v3 0/3] *ethdev: sharing indirect actions between port* Viacheslav Ovsiienko
2023-02-10 15:17     ` [PATCH v3 1/3] ethdev: sharing indirect actions between ports Viacheslav Ovsiienko
2023-02-10 15:17     ` [PATCH v3 2/3] app/testpmd: add host port parameter into flow config Viacheslav Ovsiienko
2023-02-10 15:17     ` [PATCH v3 3/3] app/testpmd: add shared indirect action support Viacheslav Ovsiienko
2023-02-10 23:02     ` [PATCH v3 0/3] *ethdev: sharing indirect actions between port* Ferruh Yigit
2023-02-13 13:37   ` [PATCH v4 0/5] net/mlx5: sharing indirect actions between port Viacheslav Ovsiienko
2023-02-13 13:37     ` [PATCH v4 1/5] net/mlx5/hws: free FT from RTC ID before set the new value Viacheslav Ovsiienko
2023-02-13 13:37     ` [PATCH v4 2/5] net/mlx5/hws: fix disconnecting matcher Viacheslav Ovsiienko
2023-02-13 13:37     ` [PATCH v4 3/5] common/mlx5: add cross port object sharing capability Viacheslav Ovsiienko
2023-02-13 13:37     ` [PATCH v4 4/5] net/mlx5: add cross port shared mode for HW steering Viacheslav Ovsiienko
2023-02-13 13:37     ` [PATCH v4 5/5] net/mlx5: support counters in cross port shared mode Viacheslav Ovsiienko
2023-02-15 13:29     ` [PATCH v4 0/5] net/mlx5: sharing indirect actions between port Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230206095229.23027-6-viacheslavo@nvidia.com \
    --to=viacheslavo@nvidia.com \
    --cc=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).