DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/3] net/mlx5: manage shared counters in Three-Level table
Date: Thu, 18 Jun 2020 15:24:43 +0800	[thread overview]
Message-ID: <1592465084-140601-3-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1592465084-140601-1-git-send-email-suanmingm@mellanox.com>

Currently, to check if any shared counter with same ID existing, it will
have to loop the counter pools to search for the counter. Even add the
counter to the list will also not so helpful while there are thousands
of shared counters in the list.

Change Three-Level table to look up the counter index saved in the
relevant table entry will be more efficient.

This patch introduces the Three-level table to save the ID relevant
counter index in the table. Then the next while the same ID comes, just
check the table entry of this ID will get the counter index directly.
No search will be needed.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         | 13 ++++++++++
 drivers/net/mlx5/mlx5.h         |  1 +
 drivers/net/mlx5/mlx5_flow_dv.c | 53 ++++++++++++++++++++++++-----------------
 3 files changed, 45 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5c86f6f..4c0c26e 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -716,6 +716,11 @@ struct mlx5_dev_ctx_shared *
 	mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
 			      &sh->share_cache.dereg_mr_cb);
 	mlx5_os_dev_shared_handler_install(sh);
+	sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+	if (!sh->cnt_id_tbl) {
+		err = rte_errno;
+		goto error;
+	}
 	mlx5_flow_aging_init(sh);
 	mlx5_flow_counters_mng_init(sh);
 	mlx5_flow_ipool_create(sh, config);
@@ -732,6 +737,10 @@ struct mlx5_dev_ctx_shared *
 error:
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	MLX5_ASSERT(sh);
+	if (sh->cnt_id_tbl) {
+		mlx5_l3t_destroy(sh->cnt_id_tbl);
+		sh->cnt_id_tbl = NULL;
+	}
 	if (sh->tis)
 		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
@@ -793,6 +802,10 @@ struct mlx5_dev_ctx_shared *
 	mlx5_flow_counters_mng_close(sh);
 	mlx5_flow_ipool_destroy(sh);
 	mlx5_os_dev_shared_handler_uninstall(sh);
+	if (sh->cnt_id_tbl) {
+		mlx5_l3t_destroy(sh->cnt_id_tbl);
+		sh->cnt_id_tbl = NULL;
+	}
 	if (sh->pd)
 		claim_zero(mlx5_glue->dealloc_pd(sh->pd));
 	if (sh->tis)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5bd5acd..1ee9da7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -565,6 +565,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
 	/* Memory Pool for mlx5 flow resources. */
+	struct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */
 	/* Shared interrupt handler section. */
 	struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
 	struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5bb252e..6e4e10c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4453,8 +4453,8 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Search for existed shared counter.
  *
- * @param[in] cont
- *   Pointer to the relevant counter pool container.
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
  * @param[in] id
  *   The shared counter ID to search.
  * @param[out] ppool
@@ -4464,26 +4464,22 @@ struct field_modify_info modify_tcp[] = {
  *   NULL if not existed, otherwise pointer to the shared extend counter.
  */
 static struct mlx5_flow_counter_ext *
-flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
+flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
 			      struct mlx5_flow_counter_pool **ppool)
 {
-	struct mlx5_flow_counter_ext *cnt;
-	struct mlx5_flow_counter_pool *pool;
-	uint32_t i, j;
-	uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
+	struct mlx5_priv *priv = dev->data->dev_private;
+	union mlx5_l3t_data data;
+	uint32_t cnt_idx;
 
-	for (i = 0; i < n_valid; i++) {
-		pool = cont->pools[i];
-		for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
-			cnt = MLX5_GET_POOL_CNT_EXT(pool, j);
-			if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
-				if (ppool)
-					*ppool = cont->pools[i];
-				return cnt;
-			}
-		}
-	}
-	return NULL;
+	if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
+		return NULL;
+	cnt_idx = data.dword;
+	/*
+	 * Shared counters don't have age info. The counter extend is after
+	 * the counter datat structure.
+	 */
+	return (struct mlx5_flow_counter_ext *)
+	       ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
 }
 
 /**
@@ -4529,7 +4525,7 @@ struct field_modify_info modify_tcp[] = {
 		return 0;
 	}
 	if (shared) {
-		cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+		cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
 		if (cnt_ext) {
 			if (cnt_ext->ref_cnt + 1 == 0) {
 				rte_errno = E2BIG;
@@ -4597,6 +4593,13 @@ struct field_modify_info modify_tcp[] = {
 		cnt_ext->shared = shared;
 		cnt_ext->ref_cnt = 1;
 		cnt_ext->id = id;
+		if (shared) {
+			union mlx5_l3t_data data;
+
+			data.dword = cnt_idx;
+			if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
+				return 0;
+		}
 	}
 	if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
 		/* Start the asynchronous batch query by the host thread. */
@@ -4679,6 +4682,7 @@ struct field_modify_info modify_tcp[] = {
 static void
 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_counter_pool *pool = NULL;
 	struct mlx5_flow_counter *cnt;
 	struct mlx5_flow_counter_ext *cnt_ext = NULL;
@@ -4689,8 +4693,13 @@ struct field_modify_info modify_tcp[] = {
 	MLX5_ASSERT(pool);
 	if (counter < MLX5_CNT_BATCH_OFFSET) {
 		cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
-		if (cnt_ext && --cnt_ext->ref_cnt)
-			return;
+		if (cnt_ext) {
+			if (--cnt_ext->ref_cnt)
+				return;
+			if (cnt_ext->shared)
+				mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+						     cnt_ext->id);
+		}
 	}
 	if (IS_AGE_POOL(pool))
 		flow_dv_counter_remove_from_age(dev, counter, cnt);
-- 
1.8.3.1


  parent reply	other threads:[~2020-06-18  7:25 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-18  7:24 [dpdk-dev] [PATCH 0/3] net/mlx5: optimize single counter allocate Suanming Mou
2020-06-18  7:24 ` [dpdk-dev] [PATCH 1/3] net/mlx5: add Three-Level table utility Suanming Mou
2020-06-18  7:24 ` Suanming Mou [this message]
2020-06-18  7:24 ` [dpdk-dev] [PATCH 3/3] net/mlx5: optimize single counter pool search Suanming Mou
2020-06-21 14:15 ` [dpdk-dev] [PATCH 0/3] net/mlx5: optimize single counter allocate Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1592465084-140601-3-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).