DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC PATCH 0/3] enable multiple DCF and buildin recipe
@ 2020-06-06 13:50 Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 1/3] drivers: add flow flush for DCF Ting Xu
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Ting Xu @ 2020-06-06 13:50 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, qiming.yang, jingjing.wu, beilei.xing,
	marko.kovacevic, john.mcnamara

This RFC patchset adds the support to enable multiple DCF instance,
add buildin recipe to accelerate specific flow creating with custom
DDP package and support flow flush in DCF.

---
This patchset depends on the patch:
net/ice: support orignal VF action for DCF
---

Qi Zhang (3):
  drivers: add flow flush for DCF
  drivers/net: support multiple DCF instance
  net/ice: enable buildin recipe 10 for custom DDP package

 doc/guides/nics/ice.rst             |   8 ++
 drivers/common/iavf/virtchnl.h      |   4 +-
 drivers/net/iavf/iavf_ethdev.c      |   2 +-
 drivers/net/ice/base/ice_common.c   |  25 ++++++
 drivers/net/ice/base/ice_switch.c   |  55 ++++++++++++-
 drivers/net/ice/base/ice_type.h     |   2 +
 drivers/net/ice/ice_dcf.c           |  22 +++++-
 drivers/net/ice/ice_dcf.h           |   2 +
 drivers/net/ice/ice_dcf_ethdev.c    | 118 +++++++++++++++++++---------
 drivers/net/ice/ice_dcf_parent.c    |   3 +
 drivers/net/ice/ice_generic_flow.c  |   7 ++
 drivers/net/ice/ice_switch_filter.c |   8 +-
 12 files changed, 209 insertions(+), 47 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [RFC PATCH 1/3] drivers: add flow flush for DCF
  2020-06-06 13:50 [dpdk-dev] [RFC PATCH 0/3] enable multiple DCF and buildin recipe Ting Xu
@ 2020-06-06 13:50 ` Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 2/3] drivers/net: support multiple DCF instance Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 3/3] net/ice: enable buildin recipe 10 for custom DDP package Ting Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Ting Xu @ 2020-06-06 13:50 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, qiming.yang, jingjing.wu, beilei.xing,
	marko.kovacevic, john.mcnamara

From: Qi Zhang <qi.z.zhang@intel.com>

Add support to enable DCF to flush flow rule created by the
previous DCF when multiple DCF feature is enabled.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h     |  4 +++-
 drivers/net/ice/ice_dcf.c          | 16 ++++++++++++++++
 drivers/net/ice/ice_dcf.h          |  1 +
 drivers/net/ice/ice_generic_flow.c |  7 +++++++
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 79515ee8b..64f97503f 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -128,7 +128,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, 37 and 38 are reserved */
+	/* opcodes 34, 35, 36 and 37 are reserved */
+	VIRTCHNL_OP_DCF_RULE_FLUSH = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
 	VIRTCHNL_OP_DCF_CMD_BUFF = 40,
 	VIRTCHNL_OP_DCF_DISABLE = 41,
@@ -1273,6 +1274,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 		 * so the validation needs to be done in PF's context.
 		 */
 		return 0;
+	case VIRTCHNL_OP_DCF_RULE_FLUSH:
 	case VIRTCHNL_OP_DCF_DISABLE:
 	case VIRTCHNL_OP_DCF_GET_VSI_MAP:
 		/* The two opcodes are required by DCF without message buffer,
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7fd70a394..6ec32d010 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -1066,3 +1066,19 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add)
 	rte_free(list);
 	return err;
 }
+
+int
+ice_dcf_flush_rules(struct ice_dcf_hw *hw)
+{
+	struct dcf_virtchnl_cmd args;
+	int err = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_DCF_RULE_FLUSH;
+
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(WARNING, "fail to execute command OF_DCF_RULE_FLUSH, DCF role must be preempted.");
+
+	return 0;
+}
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e90..7e4d48fc5 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -76,5 +76,6 @@ int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
 int ice_dcf_query_stats(struct ice_dcf_hw *hw,
 			struct virtchnl_eth_stats *pstats);
 int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add);
+int ice_dcf_flush_rules(struct ice_dcf_hw *hw);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index ad103d0e8..761f5a528 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -17,6 +17,7 @@
 
 #include "ice_ethdev.h"
 #include "ice_generic_flow.h"
+#include "ice_dcf.h"
 
 /**
  * Non-pipeline mode, fdir and switch both used as distributor,
@@ -2006,6 +2007,9 @@ ice_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_adapter *ad =
+		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_dcf_hw *hw = ad->hw.aq_send_cmd_param;
 	struct rte_flow *p_flow;
 	void *temp;
 	int ret = 0;
@@ -2018,6 +2022,9 @@ ice_flow_flush(struct rte_eth_dev *dev,
 		}
 	}
 
+	if (ad->hw.dcf_enabled && hw->multi_inst)
+		return ice_dcf_flush_rules(ad->hw.aq_send_cmd_param);
+
 	return ret;
 }
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [RFC PATCH 2/3] drivers/net: support multiple DCF instance
  2020-06-06 13:50 [dpdk-dev] [RFC PATCH 0/3] enable multiple DCF and buildin recipe Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 1/3] drivers: add flow flush for DCF Ting Xu
@ 2020-06-06 13:50 ` Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 3/3] net/ice: enable buildin recipe 10 for custom DDP package Ting Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Ting Xu @ 2020-06-06 13:50 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, qiming.yang, jingjing.wu, beilei.xing,
	marko.kovacevic, john.mcnamara

From: Qi Zhang <qi.z.zhang@intel.com>

DCF will not explicitly call VIRTCHNL_OP_DCF_DISABLE during uninit if
DCF PMD is required to support multiple instances co-exist.
This is hinted by devargs "cap=mdcf".

NOTE: in kernel PF, it still only take one VF as DCF, so there is only
one active DPDK DCF instance has the DCF capability, more specific,
when two DPDK DCF instances are probed, the later one will take over
the DCF capability from the first one and the only way to disable DCF
capability is by turn trust mode off on current active DCF.

We assume at this use case, all the SR-IOV drivers are DCF itself, so
the VSI update event is no need to be handled.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/ice.rst             |  5 ++
 drivers/net/iavf/iavf_ethdev.c      |  2 +-
 drivers/net/ice/ice_dcf.c           |  6 +-
 drivers/net/ice/ice_dcf.h           |  1 +
 drivers/net/ice/ice_dcf_ethdev.c    | 98 ++++++++++++++++-------------
 drivers/net/ice/ice_dcf_parent.c    |  3 +
 drivers/net/ice/ice_switch_filter.c |  8 ++-
 7 files changed, 74 insertions(+), 49 deletions(-)

diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 9a9f4a6bb..26f02f6bc 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -270,6 +270,11 @@ responses for the same from PF.
       192.168.0.2', dst="192.168.0.3")/TCP(flags='S')/Raw(load='XXXXXXXXXX'), \
       iface="enp24s0f0", count=10)
 
+DCF will not explicitly call VIRTCHNL_OP_DCF_DISABLE during uninit if DCF PMD
+is required to support multiple instances co-exist. When two DPDK DCF instances
+are probed, the later one will take over the DCF capability from the first one.
+This is hinted by devargs "cap=mdcf".
+
 Sample Application Notes
 ------------------------
 
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index e09efffd1..fe734e9c5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1495,7 +1495,7 @@ static int
 iavf_dcf_cap_check_handler(__rte_unused const char *key,
 			   const char *value, __rte_unused void *opaque)
 {
-	if (strcmp(value, "dcf"))
+	if (strcmp(value, "dcf") && strcmp(value, "mdcf"))
 		return -1;
 
 	return 0;
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 6ec32d010..7dcf659e5 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -641,7 +641,8 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 
 	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
 		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
-		ice_dcf_mode_disable(hw);
+		if (!hw->multi_inst)
+			ice_dcf_mode_disable(hw);
 		goto err_alloc;
 	}
 
@@ -700,7 +701,8 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_dcf_dev_interrupt_handler, hw);
 
-	ice_dcf_mode_disable(hw);
+	if (!hw->multi_inst)
+		ice_dcf_mode_disable(hw);
 	iavf_shutdown_adminq(&hw->avf);
 
 	rte_free(hw->arq_buf);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 7e4d48fc5..4d57759f6 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -50,6 +50,7 @@ struct ice_dcf_hw {
 	uint16_t vsi_id;
 
 	struct rte_eth_dev *eth_dev;
+	bool multi_inst;
 	uint8_t *rss_lut;
 	uint8_t *rss_key;
 	uint64_t supported_rxdid;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index a1b1ffb56..0ec9bd5c1 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -19,6 +19,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_ethdev.h>
 
 #include <iavf_devids.h>
 
@@ -878,11 +879,64 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.filter_ctrl             = ice_dcf_dev_filter_ctrl,
 };
 
+static int
+ice_dcf_cap_check_handler(__rte_unused const char *key,
+			  const char *value, void *opaque)
+{
+	bool *mi = opaque;
+
+	if (!strcmp(value, "dcf")) {
+		*mi = 0;
+		return 0;
+	}
+	if (!strcmp(value, "mdcf")) {
+		*mi = 1;
+		return 0;
+	}
+
+	return -1;
+}
+
+static int
+ice_dcf_cap_selected(struct ice_dcf_adapter *adapter,
+		      struct rte_devargs *devargs)
+{
+	struct rte_kvargs *kvlist;
+	const char *key = "cap";
+	int ret = 0;
+
+	if (devargs == NULL)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key))
+		goto exit;
+
+	/* dcf capability selected when there's a key-value pair: cap=dcf */
+	if (rte_kvargs_process(kvlist, key,
+			       ice_dcf_cap_check_handler,
+			       &adapter->real_hw.multi_inst) < 0)
+		goto exit;
+
+	ret = 1;
+
+exit:
+	rte_kvargs_free(kvlist);
+	return ret;
+}
+
 static int
 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
 {
+	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
 
+	if (!ice_dcf_cap_selected(adapter, pci_dev->device.devargs))
+		return 1;
+
 	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
 	eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
 	eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
@@ -915,51 +969,9 @@ ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
-static int
-ice_dcf_cap_check_handler(__rte_unused const char *key,
-			  const char *value, __rte_unused void *opaque)
-{
-	if (strcmp(value, "dcf"))
-		return -1;
-
-	return 0;
-}
-
-static int
-ice_dcf_cap_selected(struct rte_devargs *devargs)
-{
-	struct rte_kvargs *kvlist;
-	const char *key = "cap";
-	int ret = 0;
-
-	if (devargs == NULL)
-		return 0;
-
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (kvlist == NULL)
-		return 0;
-
-	if (!rte_kvargs_count(kvlist, key))
-		goto exit;
-
-	/* dcf capability selected when there's a key-value pair: cap=dcf */
-	if (rte_kvargs_process(kvlist, key,
-			       ice_dcf_cap_check_handler, NULL) < 0)
-		goto exit;
-
-	ret = 1;
-
-exit:
-	rte_kvargs_free(kvlist);
-	return ret;
-}
-
 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
 			     struct rte_pci_device *pci_dev)
 {
-	if (!ice_dcf_cap_selected(pci_dev->device.devargs))
-		return 1;
-
 	return rte_eth_dev_pci_generic_probe(pci_dev,
 					     sizeof(struct ice_dcf_adapter),
 					     ice_dcf_dev_init);
@@ -985,4 +997,4 @@ static struct rte_pci_driver rte_ice_dcf_pmd = {
 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
-RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
+RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf|mdcf");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 8ad8bea1a..b4e200ccb 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -83,6 +83,9 @@ ice_dcf_vsi_update_service_handler(void *param)
 {
 	struct ice_dcf_hw *hw = param;
 
+	if (hw->multi_inst)
+		return NULL;
+
 	usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
 
 	rte_spinlock_lock(&vsi_update_lock);
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 993044f88..4b5fa6125 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -24,6 +24,7 @@
 #include "ice_ethdev.h"
 #include "ice_generic_flow.h"
 #include "ice_dcf_ethdev.h"
+#include "ice_dcf.h"
 
 
 #define MAX_QGRP_NUM_TYPE 7
@@ -358,9 +359,10 @@ ice_switch_destroy(struct ice_adapter *ad,
 		struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
+	struct ice_dcf_hw *dcf_hw = ad->hw.aq_send_cmd_param;
+	struct ice_rule_query_data *filter_ptr;
 	struct ice_hw *hw = &ad->hw;
 	int ret;
-	struct ice_rule_query_data *filter_ptr;
 
 	filter_ptr = (struct ice_rule_query_data *)
 		flow->rule;
@@ -374,7 +376,7 @@ ice_switch_destroy(struct ice_adapter *ad,
 	}
 
 	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
-	if (ret) {
+	if (ret && !(hw->dcf_enabled && dcf_hw->multi_inst)) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 			"fail to destroy switch filter rule");
@@ -382,7 +384,7 @@ ice_switch_destroy(struct ice_adapter *ad,
 	}
 
 	rte_free(filter_ptr);
-	return ret;
+	return 0;
 }
 
 static void
-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [RFC PATCH 3/3] net/ice: enable buildin recipe 10 for custom DDP package
  2020-06-06 13:50 [dpdk-dev] [RFC PATCH 0/3] enable multiple DCF and buildin recipe Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 1/3] drivers: add flow flush for DCF Ting Xu
  2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 2/3] drivers/net: support multiple DCF instance Ting Xu
@ 2020-06-06 13:50 ` Ting Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Ting Xu @ 2020-06-06 13:50 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, qiming.yang, jingjing.wu, beilei.xing,
	marko.kovacevic, john.mcnamara

From: Qi Zhang <qi.z.zhang@intel.com>

Enable buildin recipe to accelerate DCF start and specific flow
rules creating speed with custom DDP package. Use dev_args "br=1"
to turn on buildin recipe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/ice.rst           |  3 ++
 drivers/net/ice/base/ice_common.c | 25 ++++++++++++++
 drivers/net/ice/base/ice_switch.c | 55 +++++++++++++++++++++++++++++--
 drivers/net/ice/base/ice_type.h   |  2 ++
 drivers/net/ice/ice_dcf_ethdev.c  | 38 ++++++++++++++++++---
 5 files changed, 117 insertions(+), 6 deletions(-)

diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 26f02f6bc..c517ef195 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -275,6 +275,9 @@ is required to support multiple instances co-exist. When two DPDK DCF instances
 are probed, the later one will take over the DCF capability from the first one.
 This is hinted by devargs "cap=mdcf".
 
+To accelerate the creation of specific flow, a buildin recipe for custom DDP
+package is added. Enable this by running testpmd with devargs 'br=1'.
+
 Sample Application Notes
 ------------------------
 
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 17ffdee00..406d4ed36 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -459,6 +459,28 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 }
 
+static int ice_buildin_recipe_init(struct ice_hw *hw)
+{
+	struct ice_switch_info *sw = hw->switch_info;
+	struct ice_sw_recipe *recipe;
+
+	sw->buildin_recipes = ice_malloc(hw,
+			sizeof(sw->buildin_recipes[0]) * ICE_MAX_NUM_RECIPES);
+
+	if (!sw->buildin_recipes)
+		return ICE_ERR_NO_MEMORY;
+
+	recipe = &sw->buildin_recipes[10];
+	recipe->is_root = 1;
+
+	recipe->lkup_exts.n_val_words = 1;
+	recipe->lkup_exts.field_mask[0] = 0x00ff;
+	recipe->lkup_exts.fv_words[0].off = 8;
+	recipe->lkup_exts.fv_words[0].prot_id = 32;
+
+	return ICE_SUCCESS;
+}
+
 /**
  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
  * @hw: pointer to the HW struct
@@ -477,6 +499,8 @@ enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 
 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
 
+	ice_buildin_recipe_init(hw);
+
 	return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
 }
 
@@ -538,6 +562,7 @@ void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
 			ice_free(hw, recps[i].root_buf);
 	}
 	ice_rm_all_sw_replay_rule_info(hw);
+	ice_free(hw, sw->buildin_recipes);
 	ice_free(hw, sw->recp_list);
 	ice_free(hw, sw);
 }
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 5b968b7ce..463edefb9 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -5220,6 +5220,48 @@ static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
 	{ ICE_NAT_T,		ICE_UDP_ILOS_HW },
 };
 
+
+static u16 buildin_recipe_get(struct ice_switch_info *sw,
+			      struct ice_prot_lkup_ext *lkup_exts)
+{
+	int i;
+
+	if (!sw->buildin_recipes)
+		return ICE_MAX_NUM_RECIPES;
+
+	for (i = 10; i < ICE_MAX_NUM_RECIPES; i++) {
+		struct ice_sw_recipe *recp = &sw->buildin_recipes[i];
+		struct ice_fv_word *a = lkup_exts->fv_words;
+		struct ice_fv_word *b = recp->lkup_exts.fv_words;
+		u16 *c = recp->lkup_exts.field_mask;
+		u16 *d = lkup_exts->field_mask;
+		bool found = true;
+		u8 p, q;
+
+		if (!recp->is_root)
+			continue;
+
+		if (recp->lkup_exts.n_val_words != lkup_exts->n_val_words)
+			continue;
+
+		for (p = 0; p < lkup_exts->n_val_words; p++) {
+			for (q = 0; q < recp->lkup_exts.n_val_words; q++) {
+				if (a[p].off == b[q].off &&
+				    a[p].prot_id == b[q].prot_id &&
+				    d[p] == c[q])
+					break;
+			}
+			if (q >= recp->lkup_exts.n_val_words) {
+				found = false;
+				break;
+			}
+		}
+		if (found)
+			return i;
+	}
+	return ICE_MAX_NUM_RECIPES;
+}
+
 /**
  * ice_find_recp - find a recipe
  * @hw: pointer to the hardware structure
@@ -5232,8 +5274,15 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
 {
 	bool refresh_required = true;
 	struct ice_sw_recipe *recp;
+	u16 buildin_rid;
 	u8 i;
 
+	if (hw->use_buildin_recipe) {
+		buildin_rid = buildin_recipe_get(hw->switch_info, lkup_exts);
+		if (buildin_rid < ICE_MAX_NUM_RECIPES)
+			return buildin_rid;
+	}
+
 	/* Walk through existing recipes to find a match */
 	recp = hw->switch_info->recp_list;
 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
@@ -7296,8 +7345,10 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
 	struct ice_switch_info *sw;
 
 	sw = hw->switch_info;
-	if (!sw->recp_list[remove_entry->rid].recp_created)
-		return ICE_ERR_PARAM;
+
+	if (!sw->buildin_recipes[remove_entry->rid].is_root)
+		if (!sw->recp_list[remove_entry->rid].recp_created)
+			return ICE_ERR_PARAM;
 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
 			    list_entry) {
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 94ea44265..8a131024a 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -769,6 +769,7 @@ struct ice_switch_info {
 	u16 prof_res_bm_init;
 
 	ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
+	struct ice_sw_recipe *buildin_recipes;
 };
 
 /* Port hardware description */
@@ -914,6 +915,7 @@ struct ice_hw {
 	ice_declare_bitmap(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
 	struct ice_lock rss_locks;	/* protect RSS configuration */
 	struct LIST_HEAD_TYPE rss_list_head;
+	u8 use_buildin_recipe;
 };
 
 /* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 0ec9bd5c1..89433f39a 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -897,12 +897,34 @@ ice_dcf_cap_check_handler(__rte_unused const char *key,
 	return -1;
 }
 
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+	int *i = (int *)args;
+	char *end;
+	int num;
+
+	num = strtoul(value, &end, 10);
+
+	if (num != 0 && num != 1) {
+		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
+			    "value must be 0 or 1",
+			    value, key);
+		return -1;
+	}
+
+	*i = num;
+	return 0;
+}
+
 static int
 ice_dcf_cap_selected(struct ice_dcf_adapter *adapter,
 		      struct rte_devargs *devargs)
 {
+	struct ice_adapter *ad = &adapter->parent;
 	struct rte_kvargs *kvlist;
-	const char *key = "cap";
+	const char *key_cap = "cap";
+	const char *key_br = "br";
 	int ret = 0;
 
 	if (devargs == NULL)
@@ -912,15 +934,21 @@ ice_dcf_cap_selected(struct ice_dcf_adapter *adapter,
 	if (kvlist == NULL)
 		return 0;
 
-	if (!rte_kvargs_count(kvlist, key))
+	if (!rte_kvargs_count(kvlist, key_cap))
 		goto exit;
 
 	/* dcf capability selected when there's a key-value pair: cap=dcf */
-	if (rte_kvargs_process(kvlist, key,
+	if (rte_kvargs_process(kvlist, key_cap,
 			       ice_dcf_cap_check_handler,
 			       &adapter->real_hw.multi_inst) < 0)
 		goto exit;
 
+	/* dcf capability selected when there's a key-value pair: cap=dcf */
+	if (rte_kvargs_process(kvlist, key_br,
+			       &parse_bool,
+			       &ad->hw.use_buildin_recipe) < 0)
+		goto exit;
+
 	ret = 1;
 
 exit:
@@ -997,4 +1025,6 @@ static struct rte_pci_driver rte_ice_dcf_pmd = {
 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
-RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf|mdcf");
+RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf,
+			      "cap=dcf|mdcf "
+			      "br=<1|0>");
-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-06-06  5:52 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-06 13:50 [dpdk-dev] [RFC PATCH 0/3] enable multiple DCF and buildin recipe Ting Xu
2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 1/3] drivers: add flow flush for DCF Ting Xu
2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 2/3] drivers/net: support multiple DCF instance Ting Xu
2020-06-06 13:50 ` [dpdk-dev] [RFC PATCH 3/3] net/ice: enable buildin recipe 10 for custom DDP package Ting Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).