DPDK patches and discussions
 help / color / mirror / Atom feed
From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, Peng Zhang <peng.zhang@corigine.com>,
	Chaoyong He <chaoyong.he@corigine.com>,
	Long Wu <long.wu@corigine.com>
Subject: [PATCH 20/23] net/nfp: add resource share mode of host context
Date: Wed, 19 Jun 2024 17:58:27 +0800	[thread overview]
Message-ID: <20240619095830.3479757-21-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20240619095830.3479757-1-chaoyong.he@corigine.com>

From: Peng Zhang <peng.zhang@corigine.com>

For multiple PFs flower firmware, host context resource should be
shared between PFs, so change this resource allocation from driver
to firmware.

In application start stage, the resource initialization
is not necessary anymore since PMD will allocate resource
from hardware for multiple PFs flower firmware.

Signed-off-by: Peng Zhang <peng.zhang@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c   |  2 +-
 drivers/net/nfp/flower/nfp_flower_flow.c | 89 ++++++++++++++++++++----
 drivers/net/nfp/flower/nfp_flower_flow.h |  4 +-
 3 files changed, 81 insertions(+), 14 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index f89003be8b..b0641b03d2 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -1020,7 +1020,7 @@ nfp_ct_offload_add(struct nfp_flower_representor *repr,
 	return 0;
 
 flow_teardown:
-	nfp_flow_teardown(priv, nfp_flow, false);
+	nfp_flow_teardown(repr->app_fw_flower, nfp_flow, false);
 	nfp_flow_free(nfp_flow);
 
 	return ret;
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c
index d8feff634a..215d655a18 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -487,7 +487,29 @@ nfp_flow_free(struct rte_flow *nfp_flow)
 }
 
 static int
-nfp_stats_id_alloc(struct nfp_flow_priv *priv, uint32_t *ctx)
+nfp_stats_id_alloc_from_hw(struct nfp_app_fw_flower *app_fw_flower,
+		uint32_t *stats_context_id)
+{
+	int ret;
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = app_fw_flower->pf_ethdev->process_private;
+	ret = nfp_rtsym_readl_indirect(hw_priv->pf_dev->sym_tbl,
+			"_FC_WC_EMU_0_HOST_CTX_RING_BASE",
+			"_FC_WC_HOST_CTX_RING_EMU_0", stats_context_id);
+	if (ret != 0)
+		return ret;
+
+	/* Check if context id is an invalid value */
+	if (*stats_context_id >= app_fw_flower->flow_priv->ctx_count)
+		return -ENOENT;
+
+	return 0;
+}
+
+static int
+nfp_stats_id_alloc_from_driver(struct nfp_flow_priv *priv,
+		uint32_t *ctx)
 {
 	struct circ_buf *ring;
 	uint32_t temp_stats_id;
@@ -523,7 +545,35 @@ nfp_stats_id_alloc(struct nfp_flow_priv *priv, uint32_t *ctx)
 }
 
 static int
-nfp_stats_id_free(struct nfp_flow_priv *priv, uint32_t ctx)
+nfp_stats_id_alloc(struct nfp_app_fw_flower *app_fw_flower,
+		uint32_t *stats_context_id)
+{
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = app_fw_flower->pf_ethdev->process_private;
+	if (hw_priv->pf_dev->multi_pf.enabled)
+		return nfp_stats_id_alloc_from_hw(app_fw_flower, stats_context_id);
+	else
+		return nfp_stats_id_alloc_from_driver(app_fw_flower->flow_priv,
+				stats_context_id);
+}
+
+static int
+nfp_stats_id_free_to_hw(struct nfp_net_hw_priv *hw_priv,
+		uint32_t stats_context_id)
+{
+	int ret;
+
+	ret = nfp_rtsym_writel_indirect(hw_priv->pf_dev->sym_tbl,
+			"_FC_WC_EMU_0_HOST_CTX_RING_BASE",
+			"_FC_WC_HOST_CTX_RING_EMU_0", stats_context_id);
+
+	return ret;
+}
+
+static int
+nfp_stats_id_free_to_driver(struct nfp_flow_priv *priv,
+		uint32_t ctx)
 {
 	struct circ_buf *ring;
 
@@ -540,6 +590,20 @@ nfp_stats_id_free(struct nfp_flow_priv *priv, uint32_t ctx)
 	return 0;
 }
 
+static int
+nfp_stats_id_free(struct nfp_app_fw_flower *app_fw_flower,
+		uint32_t stats_context_id)
+{
+	struct nfp_net_hw_priv *hw_priv;
+
+	hw_priv = app_fw_flower->pf_ethdev->process_private;
+	if (hw_priv->pf_dev->multi_pf.enabled)
+		return nfp_stats_id_free_to_hw(hw_priv, stats_context_id);
+	else
+		return nfp_stats_id_free_to_driver(app_fw_flower->flow_priv,
+				stats_context_id);
+}
+
 static int
 nfp_tun_add_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
 		rte_be32_t ipv4)
@@ -4570,8 +4634,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	if (key_layer.port == (uint32_t)~0)
 		key_layer.port = representor->port_id;
 
-	priv = representor->app_fw_flower->flow_priv;
-	ret = nfp_stats_id_alloc(priv, &stats_ctx);
+	ret = nfp_stats_id_alloc(representor->app_fw_flower, &stats_ctx);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "nfp stats id alloc failed.");
 		return NULL;
@@ -4586,6 +4649,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	nfp_flow->install_flag = install_flag;
 	nfp_flow->merge_flag = merge_flag;
 
+	priv = representor->app_fw_flower->flow_priv;
 	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie);
 
 	ret = nfp_flow_compile_items(representor, items, nfp_flow);
@@ -4636,7 +4700,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 free_flow:
 	nfp_flow_free(nfp_flow);
 free_stats:
-	nfp_stats_id_free(priv, stats_ctx);
+	nfp_stats_id_free(representor->app_fw_flower, stats_ctx);
 
 	return NULL;
 }
@@ -4678,15 +4742,17 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 }
 
 int
-nfp_flow_teardown(struct nfp_flow_priv *priv,
+nfp_flow_teardown(struct nfp_app_fw_flower *app_fw_flower,
 		struct rte_flow *nfp_flow,
 		bool validate_flag)
 {
 	char *mask_data;
 	uint32_t mask_len;
 	uint32_t stats_ctx;
+	struct nfp_flow_priv *priv;
 	struct nfp_fl_rule_metadata *nfp_flow_meta;
 
+	priv = app_fw_flower->flow_priv;
 	nfp_flow_meta = nfp_flow->payload.meta;
 	mask_data = nfp_flow->payload.mask_data;
 	mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ;
@@ -4704,7 +4770,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv,
 		priv->flower_version++;
 
 	stats_ctx = rte_be_to_cpu_32(nfp_flow_meta->host_ctx_id);
-	return nfp_stats_id_free(priv, stats_ctx);
+	return nfp_stats_id_free(app_fw_flower, stats_ctx);
 }
 
 static int
@@ -4716,11 +4782,9 @@ nfp_flow_validate(struct rte_eth_dev *dev,
 {
 	int ret;
 	struct rte_flow *nfp_flow;
-	struct nfp_flow_priv *priv;
 	struct nfp_flower_representor *representor;
 
 	representor = dev->data->dev_private;
-	priv = representor->app_fw_flower->flow_priv;
 
 	nfp_flow = nfp_flow_setup(representor, attr, items, actions, true);
 	if (nfp_flow == NULL) {
@@ -4729,7 +4793,7 @@ nfp_flow_validate(struct rte_eth_dev *dev,
 				NULL, "This flow can not be offloaded.");
 	}
 
-	ret = nfp_flow_teardown(priv, nfp_flow, true);
+	ret = nfp_flow_teardown(representor->app_fw_flower, nfp_flow, true);
 	if (ret != 0) {
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -4799,7 +4863,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
 	return nfp_flow;
 
 flow_teardown:
-	nfp_flow_teardown(priv, nfp_flow, false);
+	nfp_flow_teardown(app_fw_flower, nfp_flow, false);
 	nfp_flow_free(nfp_flow);
 
 	return NULL;
@@ -4838,7 +4902,7 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 	}
 
 	/* Update flow */
-	ret = nfp_flow_teardown(priv, nfp_flow, false);
+	ret = nfp_flow_teardown(app_fw_flower, nfp_flow, false);
 	if (ret != 0) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL, "Flow teardown failed.");
@@ -5217,6 +5281,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	priv->hash_seed = (uint32_t)rte_rand();
 	priv->stats_ring_size = ctx_count;
 	priv->total_mem_units = ctx_split;
+	priv->ctx_count = ctx_count;
 
 	/* Init ring buffer and unallocated mask_ids. */
 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.h b/drivers/net/nfp/flower/nfp_flower_flow.h
index 5d927edde9..5007438f67 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.h
+++ b/drivers/net/nfp/flower/nfp_flower_flow.h
@@ -7,6 +7,7 @@
 #define __NFP_FLOWER_FLOW_H__
 
 #include "../nfp_net_common.h"
+#include "nfp_flower.h"
 
 /* The firmware expects lengths in units of long words */
 #define NFP_FL_LW_SIZ                   2
@@ -145,6 +146,7 @@ struct nfp_flow_priv {
 	uint32_t active_mem_unit; /**< The size of active mem units. */
 	uint32_t total_mem_units; /**< The size of total mem units. */
 	uint32_t stats_ring_size; /**< The size of stats id ring. */
+	uint32_t ctx_count; /**< Maximum number of host context. */
 	struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */
 	struct nfp_fl_stats *stats; /**< Store stats of flow. */
 	rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */
@@ -202,7 +204,7 @@ struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor,
 		bool merge_flag);
 int nfp_flow_table_add_merge(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow);
-int nfp_flow_teardown(struct nfp_flow_priv *priv,
+int nfp_flow_teardown(struct nfp_app_fw_flower *app_fw_flower,
 		struct rte_flow *nfp_flow,
 		bool validate_flag);
 void nfp_flow_free(struct rte_flow *nfp_flow);
-- 
2.39.1


  parent reply	other threads:[~2024-06-19 10:01 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-19  9:58 [PATCH 00/23] support flower firmware with multiple PF Chaoyong He
2024-06-19  9:58 ` [PATCH 01/23] net/nfp: fix dereference of null pointer Chaoyong He
2024-06-19  9:58 ` [PATCH 02/23] net/nfp: disable ctrl VNIC queues Chaoyong He
2024-06-19  9:58 ` [PATCH 03/23] net/nfp: fix dereference of null pointer Chaoyong He
2024-06-19  9:58 ` [PATCH 04/23] net/nfp: fix repeat disable the port Chaoyong He
2024-06-19  9:58 ` [PATCH 05/23] net/nfp: fix repeat set the speed configure Chaoyong He
2024-06-19  9:58 ` [PATCH 06/23] net/nfp: make the logic simpler by adding local variable Chaoyong He
2024-06-19  9:58 ` [PATCH 07/23] net/nfp: rename the variable name Chaoyong He
2024-06-19  9:58 ` [PATCH 08/23] net/nfp: export function ID get interface Chaoyong He
2024-06-19  9:58 ` [PATCH 09/23] net/nfp: extract total phyports Chaoyong He
2024-06-19  9:58 ` [PATCH 10/23] net/nfp: extract the initialize helper function Chaoyong He
2024-06-19  9:58 ` [PATCH 11/23] net/nfp: get the VF configuration Chaoyong He
2024-06-19  9:58 ` [PATCH 12/23] net/nfp: refactor the logic of flower service Chaoyong He
2024-06-19  9:58 ` [PATCH 13/23] net/nfp: get the first VF ID of the PF Chaoyong He
2024-06-19  9:58 ` [PATCH 14/23] net/nfp: add the helper function to map rtsym with offset Chaoyong He
2024-06-19  9:58 ` [PATCH 15/23] net/nfp: add the VF table to record the VF information Chaoyong He
2024-06-19  9:58 ` [PATCH 16/23] net/nfp: support configuration of VF numbers Chaoyong He
2024-06-19  9:58 ` [PATCH 17/23] net/nfp: configure the VF queue Chaoyong He
2024-06-19  9:58 ` [PATCH 18/23] net/nfp: add check for numbers of VF representor port Chaoyong He
2024-06-19  9:58 ` [PATCH 19/23] net/nfp: add support of ring pop and push Chaoyong He
2024-06-19  9:58 ` Chaoyong He [this message]
2024-06-19  9:58 ` [PATCH 21/23] net/nfp: add resource share mode of mask ID Chaoyong He
2024-06-19  9:58 ` [PATCH 22/23] net/nfp: add device active command for nsp service Chaoyong He
2024-06-19  9:58 ` [PATCH 23/23] net/nfp: add support of flower firmware with multiple PF Chaoyong He

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240619095830.3479757-21-chaoyong.he@corigine.com \
    --to=chaoyong.he@corigine.com \
    --cc=dev@dpdk.org \
    --cc=long.wu@corigine.com \
    --cc=oss-drivers@corigine.com \
    --cc=peng.zhang@corigine.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).