DPDK patches and discussions
 help / color / mirror / Atom feed
From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, Long Wu <long.wu@corigine.com>,
	Chaoyong He <chaoyong.he@corigine.com>,
	Peng Zhang <peng.zhang@corigine.com>
Subject: [PATCH 09/17] net/nfp: enlarge the flow rules limitation
Date: Mon, 24 Jun 2024 09:57:15 +0800	[thread overview]
Message-ID: <20240624015723.3712898-10-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20240624015723.3712898-1-chaoyong.he@corigine.com>

From: Long Wu <long.wu@corigine.com>

The firmware rules limitation is enlarged to
2056(8 Eth + 1024 IPv4 + 1024 IPv6 rules).
So enlarge the flow rules limitation and add
more precise checks to flow count.

Signed-off-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/nfp_net_common.h | 16 +++++-
 drivers/net/nfp/nfp_net_flow.c   | 87 +++++++++++++++++++++++++++++---
 2 files changed, 94 insertions(+), 9 deletions(-)

diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 8e3e219261..6db849eb07 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -165,12 +165,24 @@ struct nfp_pf_dev {
 	bool speed_updated;
 };
 
-#define NFP_NET_FLOW_LIMIT    1024
+#define NFP_NET_ETH_FLOW_LIMIT    8
+#define NFP_NET_IPV4_FLOW_LIMIT   1024
+#define NFP_NET_IPV6_FLOW_LIMIT   1024
+
+#define NFP_NET_FLOW_LIMIT    ((NFP_NET_ETH_FLOW_LIMIT) +   \
+				(NFP_NET_IPV4_FLOW_LIMIT) + \
+				(NFP_NET_IPV6_FLOW_LIMIT))
+
+struct nfp_net_flow_count {
+	uint16_t eth_count;
+	uint16_t ipv4_count;
+	uint16_t ipv6_count;
+};
 
 struct nfp_net_priv {
 	uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
 	struct rte_hash *flow_table; /**< Hash table to store flow rules. */
-	uint16_t flow_count; /**< Flow count in hash table */
+	struct nfp_net_flow_count flow_count; /**< Flow count in hash table */
 	bool flow_position[NFP_NET_FLOW_LIMIT]; /**< Flow position array */
 };
 
diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c
index b0d1a57d99..74dd67abd4 100644
--- a/drivers/net/nfp/nfp_net_flow.c
+++ b/drivers/net/nfp/nfp_net_flow.c
@@ -178,7 +178,8 @@ nfp_net_flow_free(struct nfp_net_priv *priv,
 
 static int
 nfp_net_flow_calculate_items(const struct rte_flow_item items[],
-		uint32_t *match_len)
+		uint32_t *match_len,
+		uint32_t *item_type)
 {
 	int ret = -EINVAL;
 	const struct rte_flow_item *item;
@@ -188,15 +189,18 @@ nfp_net_flow_calculate_items(const struct rte_flow_item items[],
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected");
 			*match_len = sizeof(struct nfp_net_cmsg_match_eth);
+			*item_type = RTE_FLOW_ITEM_TYPE_ETH;
 			ret = 0;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected");
 			*match_len = sizeof(struct nfp_net_cmsg_match_v4);
+			*item_type = RTE_FLOW_ITEM_TYPE_IPV4;
 			return 0;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected");
 			*match_len = sizeof(struct nfp_net_cmsg_match_v6);
+			*item_type = RTE_FLOW_ITEM_TYPE_IPV6;
 			return 0;
 		default:
 			PMD_DRV_LOG(ERR, "Can't calculate match length");
@@ -643,6 +647,66 @@ nfp_net_flow_process_priority(struct rte_flow *nfp_flow,
 	}
 }
 
+static int
+nfp_net_flow_check_count(struct nfp_net_flow_count *flow_count,
+		uint32_t item_type)
+{
+	int ret = 0;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		if (flow_count->eth_count >= NFP_NET_ETH_FLOW_LIMIT)
+			ret = -ENOSPC;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		if (flow_count->ipv4_count >= NFP_NET_IPV4_FLOW_LIMIT)
+			ret = -ENOSPC;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		if (flow_count->ipv6_count >= NFP_NET_IPV6_FLOW_LIMIT)
+			ret = -ENOSPC;
+		break;
+	default:
+		ret = -ENOTSUP;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nfp_net_flow_calculate_count(struct rte_flow *nfp_flow,
+		struct nfp_net_flow_count *flow_count,
+		bool delete_flag)
+{
+	uint16_t *count;
+
+	switch (nfp_flow->payload.cmsg_type) {
+	case NFP_NET_CFG_MBOX_CMD_FS_ADD_V4:
+	case NFP_NET_CFG_MBOX_CMD_FS_DEL_V4:
+		count = &flow_count->ipv4_count;
+		break;
+	case NFP_NET_CFG_MBOX_CMD_FS_ADD_V6:
+	case NFP_NET_CFG_MBOX_CMD_FS_DEL_V6:
+		count = &flow_count->ipv6_count;
+		break;
+	case NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE:
+	case NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE:
+		count = &flow_count->eth_count;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Flow count calculate failed.");
+		return -EINVAL;
+	}
+
+	if (delete_flag)
+		(*count)--;
+	else
+		(*count)++;
+
+	return 0;
+}
+
 static struct rte_flow *
 nfp_net_flow_setup(struct rte_eth_dev *dev,
 		const struct rte_flow_attr *attr,
@@ -652,6 +716,7 @@ nfp_net_flow_setup(struct rte_eth_dev *dev,
 	int ret;
 	char *hash_data;
 	uint32_t port_id;
+	uint32_t item_type;
 	uint32_t action_len;
 	struct nfp_net_hw *hw;
 	uint32_t match_len = 0;
@@ -666,12 +731,18 @@ nfp_net_flow_setup(struct rte_eth_dev *dev,
 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	priv = app_fw_nic->ports[hw->idx]->priv;
 
-	ret = nfp_net_flow_calculate_items(items, &match_len);
+	ret = nfp_net_flow_calculate_items(items, &match_len, &item_type);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Key layers calculate failed.");
 		return NULL;
 	}
 
+	ret = nfp_net_flow_check_count(&priv->flow_count, item_type);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Flow count check failed.");
+		return NULL;
+	}
+
 	action_len = sizeof(struct nfp_net_cmsg_action);
 	port_id = ((struct nfp_net_hw *)dev->data->dev_private)->nfp_idx;
 
@@ -705,7 +776,11 @@ nfp_net_flow_setup(struct rte_eth_dev *dev,
 		goto free_flow;
 	}
 
-	priv->flow_count++;
+	ret = nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, false);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "NFP flow calculate count failed.");
+		goto free_flow;
+	}
 
 	nfp_net_flow_process_priority(nfp_flow, match_len);
 
@@ -719,11 +794,9 @@ nfp_net_flow_setup(struct rte_eth_dev *dev,
 
 static int
 nfp_net_flow_teardown(struct nfp_net_priv *priv,
-		__rte_unused struct rte_flow *nfp_flow)
+		struct rte_flow *nfp_flow)
 {
-	priv->flow_count--;
-
-	return 0;
+	return nfp_net_flow_calculate_count(nfp_flow, &priv->flow_count, true);
 }
 
 static int
-- 
2.39.1


  parent reply	other threads:[~2024-06-24  1:59 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-24  1:57 [PATCH 00/17] NFP bugfix Chaoyong He
2024-06-24  1:57 ` [PATCH 01/17] net/nfp: refactor speed configuration logic Chaoyong He
2024-06-24  1:57 ` [PATCH 02/17] net/nfp: refactor device speed update logic Chaoyong He
2024-06-24  1:57 ` [PATCH 03/17] net/nfp: fix link status display problem Chaoyong He
2024-06-24  1:57 ` [PATCH 04/17] net/nfp: fix coredump caused by firmware abnormal cleanup Chaoyong He
2024-06-24  1:57 ` [PATCH 05/17] net/nfp: forbid offload flow rules with empty action list Chaoyong He
2024-06-24  1:57 ` [PATCH 06/17] net/nfp: remove redundancy function call Chaoyong He
2024-06-24  1:57 ` [PATCH 07/17] net/nfp: fix flow rule match data problem Chaoyong He
2024-06-24  1:57 ` [PATCH 08/17] net/nfp: fix flow rule action " Chaoyong He
2024-06-24  1:57 ` Chaoyong He [this message]
2024-06-24  1:57 ` [PATCH 10/17] net/nfp: enlarge flow hash table size Chaoyong He
2024-06-24  1:57 ` [PATCH 11/17] net/nfp: fix flow position index problem Chaoyong He
2024-06-24  1:57 ` [PATCH 12/17] net/nfp: fix getting firmware version Chaoyong He
2024-06-24  1:57 ` [PATCH 13/17] doc: update the metadata description section Chaoyong He
2024-06-24  1:57 ` [PATCH 14/17] net/nfp: remove the unneeded logic Chaoyong He
2024-06-24  1:57 ` [PATCH 15/17] net/nfp: adapts the reverse sequence card Chaoyong He
2024-06-24  1:57 ` [PATCH 16/17] net/nfp: fix null pointer dereferences Chaoyong He
2024-06-24  1:57 ` [PATCH 17/17] net/nfp: fix port action core dump Chaoyong He

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240624015723.3712898-10-chaoyong.he@corigine.com \
    --to=chaoyong.he@corigine.com \
    --cc=dev@dpdk.org \
    --cc=long.wu@corigine.com \
    --cc=oss-drivers@corigine.com \
    --cc=peng.zhang@corigine.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).