DPDK patches and discussions
 help / color / mirror / Atom feed
From: zhichaox.zeng@intel.com
To: dev@dpdk.org
Cc: qiming.yang@intel.com, Zhichao Zeng <zhichaox.zeng@intel.com>,
	Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH] net/ice: support disabling ACL engine in DCF via devargs
Date: Mon, 25 Jul 2022 11:15:24 +0800	[thread overview]
Message-ID: <20220725031524.4063028-1-zhichaox.zeng@intel.com> (raw)

From: Zhichao Zeng <zhichaox.zeng@intel.com>

Support disabling DCF ACL engine via devarg "acl=off" in cmdline, aiming to
shorten the DCF startup time.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
---
 drivers/net/ice/ice_dcf_ethdev.c   | 58 +++++++++++++++++++++++-------
 drivers/net/ice/ice_dcf_ethdev.h   |  6 ++++
 drivers/net/ice/ice_dcf_parent.c   |  3 ++
 drivers/net/ice/ice_ethdev.h       |  2 ++
 drivers/net/ice/ice_generic_flow.c | 12 +++++++
 5 files changed, 68 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 0da267db1f..a51e404e64 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -45,6 +45,26 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
 static int
 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
 
+static int
+ice_dcf_cap_check_handler(__rte_unused const char *key,
+			  const char *value, __rte_unused void *opaque);
+
+static int
+ice_dcf_engine_disabled_handler(__rte_unused const char *key,
+			  const char *value, __rte_unused void *opaque);
+
+struct ice_devarg {
+	enum ice_dcf_devrarg type;
+	const char *key;
+	int (*handler)(__rte_unused const char *key,
+			  const char *value, __rte_unused void *opaque);
+};
+
+static const struct ice_devarg ice_devargs_table[] = {
+	{ICE_DCF_DEVARG_CAP, "cap", ice_dcf_cap_check_handler},
+	{ICE_DCF_DEVARG_ACL, "acl", ice_dcf_engine_disabled_handler},
+};
+
 struct rte_ice_dcf_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
 	unsigned int offset;
@@ -1909,6 +1929,16 @@ ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
+static int
+ice_dcf_engine_disabled_handler(__rte_unused const char *key,
+			  const char *value, __rte_unused void *opaque)
+{
+	if (strcmp(value, "off"))
+		return -1;
+
+	return 0;
+}
+
 static int
 ice_dcf_cap_check_handler(__rte_unused const char *key,
 			  const char *value, __rte_unused void *opaque)
@@ -1919,11 +1949,11 @@ ice_dcf_cap_check_handler(__rte_unused const char *key,
 	return 0;
 }
 
-static int
-ice_dcf_cap_selected(struct rte_devargs *devargs)
+int
+ice_devargs_check(struct rte_devargs *devargs, enum ice_dcf_devrarg devarg_type)
 {
 	struct rte_kvargs *kvlist;
-	const char *key = "cap";
+	unsigned int i = 0;
 	int ret = 0;
 
 	if (devargs == NULL)
@@ -1933,16 +1963,18 @@ ice_dcf_cap_selected(struct rte_devargs *devargs)
 	if (kvlist == NULL)
 		return 0;
 
-	if (!rte_kvargs_count(kvlist, key))
-		goto exit;
-
-	/* dcf capability selected when there's a key-value pair: cap=dcf */
-	if (rte_kvargs_process(kvlist, key,
-			       ice_dcf_cap_check_handler, NULL) < 0)
-		goto exit;
-
-	ret = 1;
+	for (i = 0; i < ARRAY_SIZE(ice_devargs_table); i++)	{
+		if (devarg_type == ice_devargs_table[i].type) {
+			if (!rte_kvargs_count(kvlist, ice_devargs_table[i].key))
+				goto exit;
 
+			if (rte_kvargs_process(kvlist, ice_devargs_table[i].key,
+					ice_devargs_table[i].handler, NULL) < 0)
+				goto exit;
+			ret = 1;
+			break;
+		}
+	}
 exit:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1960,7 +1992,7 @@ eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
 	uint16_t dcf_vsi_id;
 	int i, ret;
 
-	if (!ice_dcf_cap_selected(pci_dev->device.devargs))
+	if (!ice_devargs_check(pci_dev->device.devargs, ICE_DCF_DEVARG_CAP))
 		return 1;
 
 	ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index 27f6402786..4baaec4b8b 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -64,12 +64,18 @@ struct ice_dcf_vf_repr {
 	struct ice_dcf_vlan outer_vlan_info; /* DCF always handle outer VLAN */
 };
 
+enum ice_dcf_devrarg {
+	ICE_DCF_DEVARG_CAP,
+	ICE_DCF_DEVARG_ACL,
+};
+
 extern const struct rte_tm_ops ice_dcf_tm_ops;
 void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
 				 uint8_t *msg, uint16_t msglen);
 int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev);
 void ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev);
 
+int ice_devargs_check(struct rte_devargs *devargs, enum ice_dcf_devrarg devarg_type);
 int ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param);
 int ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev);
 int ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 2f96dedcce..c67c865d8e 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -466,6 +466,9 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
 
 	ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
 
+	if (ice_devargs_check(eth_dev->device->devargs, ICE_DCF_DEVARG_ACL))
+		parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL);
+
 	err = ice_flow_init(parent_adapter);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Failed to initialize flow");
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index ec23dae665..5bd5ead0e6 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -610,6 +610,8 @@ struct ice_adapter {
 	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
 	/* True if DCF state of the associated PF is on */
 	bool dcf_state_on;
+	/* Set bit if the engine is disabled */
+	unsigned long disabled_engine_mask;
 	struct ice_parser *psr;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 57eb002bde..d496c28dec 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -28,6 +28,8 @@
 /*Pipeline mode, fdir used at distributor stage*/
 #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
 
+#define ICE_FLOW_ENGINE_DISABLED(mask, type) ((mask) & BIT(type))
+
 static struct ice_engine_list engine_list =
 		TAILQ_HEAD_INITIALIZER(engine_list);
 
@@ -1841,6 +1843,11 @@ ice_flow_init(struct ice_adapter *ad)
 			return -ENOTSUP;
 		}
 
+		if (ICE_FLOW_ENGINE_DISABLED(ad->disabled_engine_mask, engine->type)) {
+			PMD_INIT_LOG(INFO, "Engine %d disabled", engine->type);
+			continue;
+		}
+
 		ret = engine->init(ad);
 		if (ret) {
 			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
@@ -1861,6 +1868,11 @@ ice_flow_uninit(struct ice_adapter *ad)
 	void *temp;
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (ICE_FLOW_ENGINE_DISABLED(ad->disabled_engine_mask, engine->type)) {
+			PMD_DRV_LOG(DEBUG, "Engine %d disabled skip it", engine->type);
+			continue;
+		}
+
 		if (engine->uninit)
 			engine->uninit(ad);
 	}
-- 
2.25.1


             reply	other threads:[~2022-07-25  3:15 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-25  3:15 zhichaox.zeng [this message]
2022-08-12  4:08 ` Zhang, Qi Z
2022-08-17  8:21 ` [PATCH v2] " zhichaox.zeng
2022-08-22 23:07   ` Zhang, Qi Z
2022-08-23  7:33   ` Yang, Qiming
2022-08-23  9:16     ` Zeng, ZhichaoX

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220725031524.4063028-1-zhichaox.zeng@intel.com \
    --to=zhichaox.zeng@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).