DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, qiming.yang@intel.com, Ting Xu <ting.xu@intel.com>
Subject: [dpdk-dev] [PATCH v2 2/3] net/ice: enable protocol agnostic flow offloading in RSS
Date: Mon,  1 Nov 2021 19:05:08 +0800	[thread overview]
Message-ID: <20211101110509.17359-3-ting.xu@intel.com> (raw)
In-Reply-To: <20211101110509.17359-1-ting.xu@intel.com>

Enable protocol agnostic flow offloading to support raw pattern input
for RSS hash flow rule creation. It is based on Parser Libary feature.
Current rte_flow raw API is utilized.

Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/ice/ice_ethdev.h |   9 ++
 drivers/net/ice/ice_hash.c   | 286 ++++++++++++++++++++++++++++++++---
 2 files changed, 273 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index bbfeb0cc23..76dc3c99e5 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -500,6 +500,14 @@ struct ice_fdir_prof_info {
 	u64 fdir_actived_cnt;
 };
 
+/**
+ * Structure to store rss fv entry.
+ */
+struct ice_rss_prof_info {
+	struct ice_parser_profile prof;
+	bool symm;
+};
+
 /**
  * Structure to store private data for each PF/VF instance.
  */
@@ -524,6 +532,7 @@ struct ice_adapter {
 	bool ptp_ena;
 	uint64_t time_hw;
 	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
+	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 35eff8b17d..dd37145e4a 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -32,6 +32,7 @@
 #define ICE_PHINT_GTPU_EH			BIT_ULL(3)
 #define	ICE_PHINT_GTPU_EH_DWN			BIT_ULL(4)
 #define	ICE_PHINT_GTPU_EH_UP			BIT_ULL(5)
+#define ICE_PHINT_RAW				BIT_ULL(6)
 
 #define ICE_GTPU_EH_DWNLINK	0
 #define ICE_GTPU_EH_UPLINK	1
@@ -71,6 +72,7 @@
 struct ice_rss_meta {
 	uint8_t hash_function;
 	struct ice_rss_hash_cfg cfg;
+	struct ice_rss_raw_cfg raw;
 };
 
 struct ice_hash_flow_cfg {
@@ -492,6 +494,7 @@ struct ice_rss_hash_cfg eth_tmplt = {
  */
 static struct ice_pattern_match_item ice_hash_pattern_list[] = {
 	/* IPV4 */
+	{pattern_raw,				ICE_INSET_NONE,				ICE_INSET_NONE,	NULL},
 	{pattern_eth_ipv4,			ICE_RSS_TYPE_ETH_IPV4,		ICE_INSET_NONE,	&ipv4_tmplt},
 	{pattern_eth_ipv4_udp,			ICE_RSS_TYPE_ETH_IPV4_UDP,	ICE_INSET_NONE,	&ipv4_udp_tmplt},
 	{pattern_eth_ipv4_tcp,			ICE_RSS_TYPE_ETH_IPV4_TCP,	ICE_INSET_NONE,	&ipv4_tcp_tmplt},
@@ -612,6 +615,9 @@ ice_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 		}
 
 		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_RAW:
+			*phint |= ICE_PHINT_RAW;
+			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			*phint |= ICE_PHINT_VLAN;
 			break;
@@ -639,6 +645,91 @@ ice_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 	return 0;
 }
 
+static int
+ice_hash_parse_raw_pattern(struct ice_adapter *ad,
+				const struct rte_flow_item *item,
+				struct ice_rss_meta *meta)
+{
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
+	struct ice_parser_profile prof;
+	struct ice_parser_result rslt;
+	struct ice_parser *psr;
+	uint8_t *pkt_buf, *msk_buf;
+	uint8_t spec_len, pkt_len;
+	uint8_t tmp_val = 0;
+	uint8_t tmp_c = 0;
+	int i, j;
+
+	raw_spec = item->spec;
+	raw_mask = item->mask;
+
+	spec_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+	if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+		spec_len)
+		return -rte_errno;
+
+	pkt_len = spec_len / 2;
+
+	pkt_buf = rte_zmalloc(NULL, pkt_len, 0);
+	if (!pkt_buf)
+		return -ENOMEM;
+
+	msk_buf = rte_zmalloc(NULL, pkt_len, 0);
+	if (!msk_buf)
+		return -ENOMEM;
+
+	/* convert string to int array */
+	for (i = 0, j = 0; i < spec_len; i += 2, j++) {
+		tmp_c = raw_spec->pattern[i];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			tmp_val = tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			tmp_val = tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			tmp_val = tmp_c - '0';
+
+		tmp_c = raw_spec->pattern[i + 1];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - '0';
+
+		tmp_c = raw_mask->pattern[i];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			tmp_val = tmp_c - 0x57;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			tmp_val = tmp_c - 0x37;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			tmp_val = tmp_c - '0';
+
+		tmp_c = raw_mask->pattern[i + 1];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			msk_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			msk_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			msk_buf[j] = tmp_val * 16 + tmp_c - '0';
+	}
+
+	if (ice_parser_create(&ad->hw, &psr))
+		return -rte_errno;
+	if (ice_parser_run(psr, pkt_buf, pkt_len, &rslt))
+		return -rte_errno;
+	ice_parser_destroy(psr);
+
+	if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+		pkt_len, ICE_BLK_RSS, true, &prof))
+		return -rte_errno;
+
+	rte_memcpy(&meta->raw.prof, &prof, sizeof(prof));
+
+	rte_free(pkt_buf);
+	rte_free(msk_buf);
+	return 0;
+}
+
 static void
 ice_refine_hash_cfg_l234(struct ice_rss_hash_cfg *hash_cfg,
 			 uint64_t rss_type)
@@ -999,7 +1090,10 @@ ice_hash_parse_action(struct ice_pattern_match_item *pattern_match_item,
 				   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
 				rss_meta->hash_function =
 				RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
-				cfg->symm = true;
+				if (pattern_hint == ICE_PHINT_RAW)
+					rss_meta->raw.symm = true;
+				else
+					cfg->symm = true;
 			}
 
 			if (rss->level)
@@ -1017,6 +1111,10 @@ ice_hash_parse_action(struct ice_pattern_match_item *pattern_match_item,
 					RTE_FLOW_ERROR_TYPE_ACTION, action,
 					"a non-NULL RSS queue is not supported");
 
+			/* If pattern type is raw, no need to refine rss type */
+			if (pattern_hint == ICE_PHINT_RAW)
+				break;
+
 			/**
 			 * Check simultaneous use of SRC_ONLY and DST_ONLY
 			 * of the same level.
@@ -1085,6 +1183,17 @@ ice_hash_parse_pattern_action(__rte_unused struct ice_adapter *ad,
 	if (ret)
 		goto error;
 
+	if (phint == ICE_PHINT_RAW) {
+		rss_meta_ptr->raw.raw_ena = true;
+		ret = ice_hash_parse_raw_pattern(ad, pattern, rss_meta_ptr);
+		if (ret) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					   "Parse raw pattern failed");
+			goto error;
+		}
+	}
+
 	/* Check rss action. */
 	ret = ice_hash_parse_action(pattern_match_item, actions, phint,
 				    (void **)&rss_meta_ptr, error);
@@ -1099,6 +1208,71 @@ ice_hash_parse_pattern_action(__rte_unused struct ice_adapter *ad,
 	return ret;
 }
 
+static int
+ice_hash_add_raw_cfg(struct ice_adapter *ad,
+		struct ice_rss_raw_cfg *cfg, u16 vsi_handle)
+{
+	struct ice_parser_profile *prof = &cfg->prof;
+	struct ice_rss_prof_info *rss_prof;
+	struct ice_hw *hw = &ad->hw;
+	int i, ptg, ret;
+	u64 id;
+
+	id = (u64)ice_find_first_bit(prof->ptypes, UINT16_MAX);
+
+	ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+	rss_prof = &ad->rss_prof_info[ptg];
+	/* check if ptg already has profile */
+	if (rss_prof->prof.fv_num) {
+		for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+			if (rss_prof->prof.fv[i].proto_id !=
+			    prof->fv[i].proto_id ||
+			    rss_prof->prof.fv[i].offset !=
+			    prof->fv[i].offset)
+				break;
+		}
+
+		/* current profile is matched, check symmetric hash */
+		if (i == ICE_MAX_FV_WORDS) {
+			if (rss_prof->symm == cfg->symm)
+				return ICE_SUCCESS;
+			else
+				goto update_symm;
+		}
+
+		/* current profile is not matched, remove it */
+		ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
+					   ice_get_hw_vsi_num(hw, vsi_handle),
+					   id);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "remove RSS flow failed\n");
+			return ret;
+		}
+
+		ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "remove RSS profile failed\n");
+			return ret;
+		}
+	}
+
+	/* add new profile */
+	ret = ice_flow_set_hw_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "HW profile add failed\n");
+		return ret;
+	}
+
+	rss_prof->symm = cfg->symm;
+	ice_memcpy(&rss_prof->prof, prof,
+		   sizeof(struct ice_parser_profile),
+		   ICE_NONDMA_TO_NONDMA);
+
+update_symm:
+	ice_rss_update_raw_symm(hw, cfg, id);
+	return 0;
+}
+
 static int
 ice_hash_create(struct ice_adapter *ad,
 		struct rte_flow *flow,
@@ -1134,15 +1308,30 @@ ice_hash_create(struct ice_adapter *ad,
 
 		goto out;
 	} else {
-		memcpy(&filter_ptr->rss_cfg.hash, &rss_meta->cfg,
-		       sizeof(struct ice_rss_hash_cfg));
-		ret = ice_add_rss_cfg_wrap(pf, vsi->idx,
-					   &filter_ptr->rss_cfg.hash);
-		if (ret) {
-			rte_flow_error_set(error, EINVAL,
-					RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					"rss flow create fail");
-			goto error;
+		if (rss_meta->raw.raw_ena) {
+			memcpy(&filter_ptr->rss_cfg.raw, &rss_meta->raw,
+			       sizeof(struct ice_rss_raw_cfg));
+			ret = ice_hash_add_raw_cfg(ad, &rss_meta->raw,
+						   pf->main_vsi->idx);
+			if (ret) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "rss flow create fail");
+				goto error;
+			}
+		} else {
+			memcpy(&filter_ptr->rss_cfg.hash, &rss_meta->cfg,
+			       sizeof(struct ice_rss_hash_cfg));
+			ret = ice_add_rss_cfg_wrap(pf, vsi->idx,
+						   &filter_ptr->rss_cfg.hash);
+			if (ret) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "rss flow create fail");
+				goto error;
+			}
 		}
 	}
 
@@ -1157,6 +1346,45 @@ ice_hash_create(struct ice_adapter *ad,
 	return -rte_errno;
 }
 
+static int
+ice_hash_rem_raw_cfg(struct ice_adapter *ad,
+			struct ice_parser_profile *prof,
+		    u16 vsi_handle)
+{
+	struct ice_hw *hw = &ad->hw;
+	int ptg, ret;
+	u16 vsig;
+	u64 id;
+
+	id = (u64)ice_find_first_bit(prof->ptypes, 0xFFFF);
+
+	ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+
+	memset(&ad->rss_prof_info[ptg], 0,
+		sizeof(struct ice_rss_prof_info));
+
+	/* check if vsig is already removed */
+	ret = ice_vsig_find_vsi(hw, ICE_BLK_RSS,
+		ice_get_hw_vsi_num(hw, vsi_handle), &vsig);
+	if (!ret && vsig) {
+		ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
+					   ice_get_hw_vsi_num(hw, vsi__handle),
+					   id);
+		if (ret)
+			goto err;
+
+		ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "HW profile remove failed\n");
+	return ret;
+}
+
 static int
 ice_hash_destroy(struct ice_adapter *ad,
 		struct rte_flow *flow,
@@ -1178,18 +1406,32 @@ ice_hash_destroy(struct ice_adapter *ad,
 			(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
 		ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
 	} else {
-		ret = ice_rem_rss_cfg_wrap(pf, vsi->idx,
-					   &filter_ptr->rss_cfg.hash);
-		/* Fixme: Ignore the error if a rule does not exist.
-		 * Currently a rule for inputset change or symm turn on/off
-		 * will overwrite an exist rule, while application still
-		 * have 2 rte_flow handles.
-		 **/
-		if (ret && ret != ICE_ERR_DOES_NOT_EXIST) {
-			rte_flow_error_set(error, EINVAL,
-					RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					"rss flow destroy fail");
-			goto error;
+		if (filter_ptr->rss_cfg.raw.raw_ena) {
+			ret =
+			ice_hash_rem_raw_cfg(ad, &filter_ptr->rss_cfg.raw.prof,
+					     pf->main_vsi->idx);
+			if (ret) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "rss flow destroy fail");
+				goto error;
+			}
+		} else {
+			ret = ice_rem_rss_cfg_wrap(pf, vsi->idx,
+						   &filter_ptr->rss_cfg.hash);
+			/* Fixme: Ignore the error if a rule does not exist.
+			 * Currently a rule for inputset change or symm turn
+			 * on/off will overwrite an exist rule, while
+			 * application still have 2 rte_flow handles.
+			 **/
+			if (ret && ret != ICE_ERR_DOES_NOT_EXIST) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_HANDLE,
+						   NULL,
+						   "rss flow destroy fail");
+				goto error;
+			}
 		}
 	}
 
-- 
2.17.1


  parent reply	other threads:[~2021-11-01 11:03 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-08  7:09 [dpdk-dev] [PATCH v1 0/2] " Ting Xu
2021-10-08  7:09 ` [dpdk-dev] [PATCH v1 1/2] net/ice: " Ting Xu
2021-10-08  7:09 ` [dpdk-dev] [PATCH v1 2/2] doc: add protocol agnostic flow offloading for RSS hash Ting Xu
2021-11-01 11:02 ` [dpdk-dev] [PATCH v2 0/3] enable protocol agnostic flow offloading in RSS Ting Xu
2021-11-01 11:02   ` [dpdk-dev] [PATCH v2 1/3] net/ice: " Ting Xu
2021-11-01 11:02   ` [dpdk-dev] [PATCH v2 2/3] " Ting Xu
2021-11-01 11:02   ` [dpdk-dev] [PATCH v2 3/3] doc: add protocol agnostic flow offloading for RSS Ting Xu
2021-11-01 11:05 ` [dpdk-dev] [PATCH v2 0/3] enable protocol agnostic flow offloading in RSS Ting Xu
2021-11-01 11:05   ` [dpdk-dev] [PATCH v2 1/3] net/ice/base: support add HW profile for RSS raw flow Ting Xu
2021-11-01 11:05   ` Ting Xu [this message]
2021-11-01 11:05   ` [dpdk-dev] [PATCH v2 3/3] doc: add protocol agnostic flow offloading for RSS Ting Xu
2021-11-02  1:49 ` [dpdk-dev] [PATCH v3 0/3] enable protocol agnostic flow offloading in RSS Ting Xu
2021-11-02  1:49   ` [dpdk-dev] [PATCH v3 1/3] net/ice/base: support add HW profile for RSS raw flow Ting Xu
2021-11-02  1:49   ` [dpdk-dev] [PATCH v3 2/3] net/ice: enable protocol agnostic flow offloading in RSS Ting Xu
2021-11-03 14:13     ` Ferruh Yigit
2021-11-04  3:29       ` Xu, Ting
2021-11-02  1:49   ` [dpdk-dev] [PATCH v3 3/3] doc: add protocol agnostic flow offloading for RSS Ting Xu
2021-11-03 12:45   ` [dpdk-dev] [PATCH v3 0/3] enable protocol agnostic flow offloading in RSS Zhang, Qi Z
2021-11-03 14:15     ` Ferruh Yigit
2021-11-04  2:22 ` [dpdk-dev] [PATCH v4 0/2] " Ting Xu
2021-11-04  2:22   ` [dpdk-dev] [PATCH v4 1/2] net/ice/base: support add HW profile for RSS raw flow Ting Xu
2021-11-04  2:22   ` [dpdk-dev] [PATCH v4 2/2] net/ice: enable protocol agnostic flow offloading in RSS Ting Xu
2021-11-04  3:31   ` [dpdk-dev] [PATCH v4 0/2] " Zhang, Qi Z
2021-11-05 13:09   ` Thomas Monjalon
2021-11-05 13:18     ` Xu, Ting
2021-11-05 13:23       ` Thomas Monjalon
2021-11-05 14:53         ` Xu, Ting
2021-11-08  2:44           ` Zhang, Qi Z
2021-11-08 19:47             ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211101110509.17359-3-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).