DPDK patches and discussions
 help / color / mirror / Atom feed
From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, Peng Zhang <peng.zhang@corigine.com>,
	Chaoyong He <chaoyong.he@corigine.com>,
	Long Wu <long.wu@corigine.com>
Subject: [PATCH 21/23] net/nfp: add resource share mode of mask ID
Date: Wed, 19 Jun 2024 17:58:28 +0800	[thread overview]
Message-ID: <20240619095830.3479757-22-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20240619095830.3479757-1-chaoyong.he@corigine.com>

From: Peng Zhang <peng.zhang@corigine.com>

For multiple PFs flower firmware, mask ID resource should be
shared between PFs, so change this resource allocation from
driver to hardware.

In application start stage, the resource initialization
is not necessary anymore since PMD will allocate resource
from hardware for multiple PFs flower firmware.

Signed-off-by: Peng Zhang <peng.zhang@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_flow.c | 142 ++++++++++++++++++-----
 1 file changed, 111 insertions(+), 31 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c
index 215d655a18..0fb63013d3 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -161,7 +161,51 @@ nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
 }
 
 static int
-nfp_mask_id_alloc(struct nfp_flow_priv *priv,
+nfp_mask_id_alloc_from_hw(struct nfp_net_hw_priv *hw_priv,
+		uint8_t *mask_id)
+{
+	int ret;
+	uint8_t freed_id;
+	uint32_t mask = 0;
+
+	/* Checking if buffer is empty. */
+	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
+
+	ret = nfp_rtsym_readl_indirect(hw_priv->pf_dev->sym_tbl,
+			"_FC_WC_EMU_0_MASK_ID_RING_BASE",
+			"_FC_WC_MASK_ID_RING_EMU_0", &mask);
+	if (ret != 0) {
+		*mask_id = freed_id;
+		return ret;
+	}
+
+	/* 0 is an invalid value */
+	if (mask == 0 || mask >= NFP_FLOWER_MASK_ENTRY_RS) {
+		*mask_id = freed_id;
+		return -ENOENT;
+	}
+
+	*mask_id = (uint8_t)mask;
+
+	return 0;
+}
+
+static int
+nfp_mask_id_free_from_hw(struct nfp_net_hw_priv *hw_priv,
+		uint8_t mask_id)
+{
+	int ret;
+	uint32_t mask = mask_id;
+
+	ret = nfp_rtsym_writel_indirect(hw_priv->pf_dev->sym_tbl,
+			"_FC_WC_EMU_0_MASK_ID_RING_BASE",
+			"_FC_WC_MASK_ID_RING_EMU_0", mask);
+
+	return ret;
+}
+
+static int
+nfp_mask_id_alloc_from_driver(struct nfp_flow_priv *priv,
 		uint8_t *mask_id)
 {
 	uint8_t temp_id;
@@ -194,7 +238,7 @@ nfp_mask_id_alloc(struct nfp_flow_priv *priv,
 }
 
 static int
-nfp_mask_id_free(struct nfp_flow_priv *priv,
+nfp_mask_id_free_from_driver(struct nfp_flow_priv *priv,
 		uint8_t mask_id)
 {
 	struct circ_buf *ring;
@@ -213,7 +257,33 @@ nfp_mask_id_free(struct nfp_flow_priv *priv,
 }
 
 static int
-nfp_mask_table_add(struct nfp_flow_priv *priv,
+nfp_mask_id_alloc(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t *mask_id)
+{
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = app_fw_flower->pf_ethdev->process_private;
+	if (hw_priv->pf_dev->multi_pf.enabled)
+		return nfp_mask_id_alloc_from_hw(hw_priv, mask_id);
+	else
+		return nfp_mask_id_alloc_from_driver(app_fw_flower->flow_priv, mask_id);
+}
+
+static int
+nfp_mask_id_free(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t mask_id)
+{
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = app_fw_flower->pf_ethdev->process_private;
+	if (hw_priv->pf_dev->multi_pf.enabled)
+		return nfp_mask_id_free_from_hw(hw_priv, mask_id);
+	else
+		return nfp_mask_id_free_from_driver(app_fw_flower->flow_priv, mask_id);
+}
+
+static int
+nfp_mask_table_add(struct nfp_app_fw_flower *app_fw_flower,
 		char *mask_data,
 		uint32_t mask_len,
 		uint8_t *id)
@@ -221,6 +291,7 @@ nfp_mask_table_add(struct nfp_flow_priv *priv,
 	int ret;
 	uint8_t mask_id;
 	uint32_t hash_key;
+	struct nfp_flow_priv *priv;
 	struct nfp_mask_id_entry *mask_entry;
 
 	mask_entry = rte_zmalloc("mask_entry", sizeof(struct nfp_mask_id_entry), 0);
@@ -229,10 +300,11 @@ nfp_mask_table_add(struct nfp_flow_priv *priv,
 		goto exit;
 	}
 
-	ret = nfp_mask_id_alloc(priv, &mask_id);
+	ret = nfp_mask_id_alloc(app_fw_flower, &mask_id);
 	if (ret != 0)
 		goto mask_entry_free;
 
+	priv = app_fw_flower->flow_priv;
 	hash_key = rte_jhash(mask_data, mask_len, priv->hash_seed);
 	mask_entry->mask_id  = mask_id;
 	mask_entry->hash_key = hash_key;
@@ -250,7 +322,7 @@ nfp_mask_table_add(struct nfp_flow_priv *priv,
 	return 0;
 
 mask_id_free:
-	nfp_mask_id_free(priv, mask_id);
+	nfp_mask_id_free(app_fw_flower, mask_id);
 mask_entry_free:
 	rte_free(mask_entry);
 exit:
@@ -258,14 +330,16 @@ nfp_mask_table_add(struct nfp_flow_priv *priv,
 }
 
 static int
-nfp_mask_table_del(struct nfp_flow_priv *priv,
+nfp_mask_table_del(struct nfp_app_fw_flower *app_fw_flower,
 		char *mask_data,
 		uint32_t mask_len,
 		uint8_t id)
 {
 	int ret;
 	uint32_t hash_key;
+	struct nfp_flow_priv *priv;
 
+	priv = app_fw_flower->flow_priv;
 	hash_key = rte_jhash(mask_data, mask_len, priv->hash_seed);
 	ret = rte_hash_del_key(priv->mask_table, &hash_key);
 	if (ret < 0) {
@@ -273,7 +347,7 @@ nfp_mask_table_del(struct nfp_flow_priv *priv,
 		return ret;
 	}
 
-	ret = nfp_mask_id_free(priv, id);
+	ret = nfp_mask_id_free(app_fw_flower, id);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Free mask id failed.");
 		return ret;
@@ -302,19 +376,21 @@ nfp_mask_table_search(struct nfp_flow_priv *priv,
 }
 
 static bool
-nfp_check_mask_add(struct nfp_flow_priv *priv,
+nfp_check_mask_add(struct nfp_app_fw_flower *app_fw_flower,
 		char *mask_data,
 		uint32_t mask_len,
 		uint8_t *meta_flags,
 		uint8_t *mask_id)
 {
 	int ret;
+	struct nfp_flow_priv *priv;
 	struct nfp_mask_id_entry *mask_entry;
 
+	priv = app_fw_flower->flow_priv;
 	mask_entry = nfp_mask_table_search(priv, mask_data, mask_len);
 	if (mask_entry == NULL) {
 		/* Mask entry does not exist, let's create one */
-		ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);
+		ret = nfp_mask_table_add(app_fw_flower, mask_data, mask_len, mask_id);
 		if (ret != 0)
 			return false;
 
@@ -329,21 +405,23 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,
 }
 
 static bool
-nfp_check_mask_remove(struct nfp_flow_priv *priv,
+nfp_check_mask_remove(struct nfp_app_fw_flower *app_fw_flower,
 		char *mask_data,
 		uint32_t mask_len,
 		uint8_t *meta_flags)
 {
 	int ret;
+	struct nfp_flow_priv *priv;
 	struct nfp_mask_id_entry *mask_entry;
 
+	priv = app_fw_flower->flow_priv;
 	mask_entry = nfp_mask_table_search(priv, mask_data, mask_len);
 	if (mask_entry == NULL)
 		return false;
 
 	mask_entry->ref_cnt--;
 	if (mask_entry->ref_cnt == 0) {
-		ret = nfp_mask_table_del(priv, mask_data, mask_len,
+		ret = nfp_mask_table_del(app_fw_flower, mask_data, mask_len,
 				mask_entry->mask_id);
 		if (ret != 0)
 			return false;
@@ -4667,7 +4745,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	nfp_flow_meta = nfp_flow->payload.meta;
 	mask_data = nfp_flow->payload.mask_data;
 	mask_len = key_layer.key_size;
-	if (!nfp_check_mask_add(priv, mask_data, mask_len,
+	if (!nfp_check_mask_add(representor->app_fw_flower, mask_data, mask_len,
 			&nfp_flow_meta->flags, &new_mask_id)) {
 		PMD_DRV_LOG(ERR, "nfp mask add check failed.");
 		goto free_flow;
@@ -4684,7 +4762,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	flow_find = nfp_flow_table_search(priv, nfp_flow);
 	if (flow_find != NULL && !nfp_flow->merge_flag && !flow_find->merge_flag) {
 		PMD_DRV_LOG(ERR, "This flow is already exist.");
-		if (!nfp_check_mask_remove(priv, mask_data, mask_len,
+		if (!nfp_check_mask_remove(representor->app_fw_flower, mask_data, mask_len,
 				&nfp_flow_meta->flags)) {
 			PMD_DRV_LOG(ERR, "nfp mask del check failed.");
 		}
@@ -4757,7 +4835,7 @@ nfp_flow_teardown(struct nfp_app_fw_flower *app_fw_flower,
 	mask_data = nfp_flow->payload.mask_data;
 	mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ;
 	nfp_flow_meta->flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
-	if (!nfp_check_mask_remove(priv, mask_data, mask_len,
+	if (!nfp_check_mask_remove(app_fw_flower, mask_data, mask_len,
 			&nfp_flow_meta->flags)) {
 		PMD_DRV_LOG(ERR, "nfp mask del check failed.");
 		return -EINVAL;
@@ -5283,24 +5361,26 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	priv->total_mem_units = ctx_split;
 	priv->ctx_count = ctx_count;
 
-	/* Init ring buffer and unallocated mask_ids. */
-	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
-	priv->mask_ids.free_list.buf = rte_zmalloc("nfp_app_mask_ids",
-			NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS, 0);
-	if (priv->mask_ids.free_list.buf == NULL) {
-		PMD_INIT_LOG(ERR, "mask id free list creation failed");
-		ret = -ENOMEM;
-		goto free_priv;
-	}
+	if (!pf_dev->multi_pf.enabled) {
+		/* Init ring buffer and unallocated mask_ids. */
+		priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
+		priv->mask_ids.free_list.buf = rte_zmalloc("nfp_app_mask_ids",
+				NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS, 0);
+		if (priv->mask_ids.free_list.buf == NULL) {
+			PMD_INIT_LOG(ERR, "mask id free list creation failed");
+			ret = -ENOMEM;
+			goto free_priv;
+		}
 
-	/* Init ring buffer and unallocated stats_ids. */
-	priv->stats_ids.init_unallocated = ctx_count / ctx_split;
-	priv->stats_ids.free_list.buf = rte_zmalloc("nfp_app_stats_ids",
-			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS, 0);
-	if (priv->stats_ids.free_list.buf == NULL) {
-		PMD_INIT_LOG(ERR, "stats id free list creation failed");
-		ret = -ENOMEM;
-		goto free_mask_id;
+		/* Init ring buffer and unallocated stats_ids. */
+		priv->stats_ids.init_unallocated = ctx_count / ctx_split;
+		priv->stats_ids.free_list.buf = rte_zmalloc("nfp_app_stats_ids",
+				priv->stats_ring_size * NFP_FL_STATS_ELEM_RS, 0);
+		if (priv->stats_ids.free_list.buf == NULL) {
+			PMD_INIT_LOG(ERR, "stats id free list creation failed");
+			ret = -ENOMEM;
+			goto free_mask_id;
+		}
 	}
 
 	/* Flow stats */
-- 
2.39.1


  parent reply	other threads:[~2024-06-19 10:01 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-19  9:58 [PATCH 00/23] support flower firmware with multiple PF Chaoyong He
2024-06-19  9:58 ` [PATCH 01/23] net/nfp: fix dereference of null pointer Chaoyong He
2024-06-19  9:58 ` [PATCH 02/23] net/nfp: disable ctrl VNIC queues Chaoyong He
2024-06-19  9:58 ` [PATCH 03/23] net/nfp: fix dereference of null pointer Chaoyong He
2024-06-19  9:58 ` [PATCH 04/23] net/nfp: fix repeat disable the port Chaoyong He
2024-06-19  9:58 ` [PATCH 05/23] net/nfp: fix repeat set the speed configure Chaoyong He
2024-06-19  9:58 ` [PATCH 06/23] net/nfp: make the logic simpler by adding local variable Chaoyong He
2024-06-19  9:58 ` [PATCH 07/23] net/nfp: rename the variable name Chaoyong He
2024-06-19  9:58 ` [PATCH 08/23] net/nfp: export function ID get interface Chaoyong He
2024-06-19  9:58 ` [PATCH 09/23] net/nfp: extract total phyports Chaoyong He
2024-06-19  9:58 ` [PATCH 10/23] net/nfp: extract the initialize helper function Chaoyong He
2024-06-19  9:58 ` [PATCH 11/23] net/nfp: get the VF configuration Chaoyong He
2024-06-19  9:58 ` [PATCH 12/23] net/nfp: refactor the logic of flower service Chaoyong He
2024-06-19  9:58 ` [PATCH 13/23] net/nfp: get the first VF ID of the PF Chaoyong He
2024-06-19  9:58 ` [PATCH 14/23] net/nfp: add the helper function to map rtsym with offset Chaoyong He
2024-06-19  9:58 ` [PATCH 15/23] net/nfp: add the VF table to record the VF information Chaoyong He
2024-06-19  9:58 ` [PATCH 16/23] net/nfp: support configuration of VF numbers Chaoyong He
2024-06-19  9:58 ` [PATCH 17/23] net/nfp: configure the VF queue Chaoyong He
2024-06-19  9:58 ` [PATCH 18/23] net/nfp: add check for numbers of VF representor port Chaoyong He
2024-06-19  9:58 ` [PATCH 19/23] net/nfp: add support of ring pop and push Chaoyong He
2024-06-19  9:58 ` [PATCH 20/23] net/nfp: add resource share mode of host context Chaoyong He
2024-06-19  9:58 ` Chaoyong He [this message]
2024-06-19  9:58 ` [PATCH 22/23] net/nfp: add device active command for nsp service Chaoyong He
2024-06-19  9:58 ` [PATCH 23/23] net/nfp: add support of flower firmware with multiple PF Chaoyong He

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240619095830.3479757-22-chaoyong.he@corigine.com \
    --to=chaoyong.he@corigine.com \
    --cc=dev@dpdk.org \
    --cc=long.wu@corigine.com \
    --cc=oss-drivers@corigine.com \
    --cc=peng.zhang@corigine.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).