DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wei Zhao <wei.zhao1@intel.com>
To: dev@dpdk.org
Cc: stable@dpdk.org, wenzhuo.lu@intel.com, qi.z.zhang@intel.com,
	yuan.peng@intel.com, Wei Zhao <wei.zhao1@intel.com>
Subject: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
Date: Tue, 25 Dec 2018 13:44:47 +0800	[thread overview]
Message-ID: <1545716688-34608-2-git-send-email-wei.zhao1@intel.com> (raw)
In-Reply-To: <1545716688-34608-1-git-send-email-wei.zhao1@intel.com>

There is need for users to use flexible byte filter on x550.
x550 support IP mode and tunnel mode flexible byte filter.
tunnel mode flexible byte filter is used for vxlan packets
and so on. It can be used combined with FDIR tunnel filter.
By now, ixgbe PMD flow parer code do not support tunnel mode
flexible byte filter for x550, So I have to enable it in function
ixgbe_parse_fdir_filter_tunnel(). Although IP mode flexible byte
filter parser is support in function ixgbe_parse_fdir_filter_normal(),
but some flow like  "flow create 0 ingress pattern raw pattern is
0xab / end actions queue index 3 / end" need to be support, so
parser code also need change a little.
This patch enable all of these feature.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	/* flex byte mask */
 	if (info->mask.flex_bytes_mask == 0)
 		fdirm |= IXGBE_FDIRM_FLEX;
+	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+		fdirm |= IXGBE_FDIRM_L3P;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 	/* mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-			 IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdiripv6m;
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 	uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_mask;
 	const struct rte_flow_item_raw *raw_spec;
 	uint8_t j;
-
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
 	if (!pattern) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
-	rule->mask.flex_bytes_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
 		/* check mask */
 		if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_spec = item->spec;
+		rule->b_spec = TRUE;
 
 		/* check spec */
 		if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_eth *eth_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 	uint32_t j;
 
 	if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				&rule->ixgbe_fdir.formatted.tni_vni),
 				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
 		}
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
 			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
 		}
-	}
 
-	/* check if the next not void item is MAC */
-	item = next_no_void_pattern(pattern, item);
-	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * others should be masked.
 	 */
 
-	if (!item->mask) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-	}
-	rule->b_mask = TRUE;
-	eth_mask = item->mask;
-
-	/* Ether type should be masked. */
-	if (eth_mask->type) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-
-	/* src MAC address should be masked. */
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		if (eth_mask->src.addr_bytes[j]) {
-			memset(rule, 0,
-			       sizeof(struct ixgbe_fdir_rule));
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
-	rule->mask.mac_addr_byte_mask = 0;
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		/* It's a per byte mask. */
-		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-			rule->mask.mac_addr_byte_mask |= 0x1 << j;
-		} else if (eth_mask->dst.addr_bytes[j]) {
-			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		/*Not supported last point for range*/
+		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		rule->b_mask = TRUE;
+		eth_mask = item->mask;
+
+		/* Ether type should be masked. */
+		if (eth_mask->type) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
 
-	/* When no vlan, considered as full mask. */
-	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-	if (item->spec) {
-		rule->b_spec = TRUE;
-		eth_spec = item->spec;
-
-		/* Get the dst MAC. */
+		/* src MAC address should be masked. */
 		for (j = 0; j < ETHER_ADDR_LEN; j++) {
-			rule->ixgbe_fdir.formatted.inner_mac[j] =
-				eth_spec->dst.addr_bytes[j];
+			if (eth_mask->src.addr_bytes[j]) {
+				memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
+		}
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			/* It's a per byte mask. */
+			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+				rule->mask.mac_addr_byte_mask |= 0x1 << j;
+			} else if (eth_mask->dst.addr_bytes[j]) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
 		}
-	}
 
-	/**
-	 * Check if the next not void item is vlan or ipv4.
-	 * IPv6 is not supported.
-	 */
-	item = next_no_void_pattern(pattern, item);
-	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+			/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 			item, "Not supported last point for range");
-		return -rte_errno;
+			return -rte_errno;
+		}
 	}
 
+
 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 		if (!(item->spec && item->mask)) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 		rule->mask.vlan_tci_mask = vlan_mask->tci;
 		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
 		/* More than one tags are not supported. */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = item->spec;
+		rule->b_spec = TRUE;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 		/* check if the next not void item is END */
 		item = next_no_void_pattern(pattern, item);
-
 		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 		hw->mac.type != ixgbe_mac_X550EM_a)
 		return -ENOTSUP;
 
+	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		goto tunnel_filter;
+
 	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
 					actions, rule, error);
 
 	if (!ret)
 		goto step_next;
 
+tunnel_filter:
+
 	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
 					actions, rule, error);
 
-- 
2.7.5

  reply	other threads:[~2018-12-25  6:10 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-17  5:53 [dpdk-dev] [PATCH] " Zhao Wei
2018-12-17  6:19 ` Zhao1, Wei
2018-12-24 12:13 ` Zhang, Qi Z
2018-12-25  2:04   ` Zhao1, Wei
2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
2018-12-25  5:44   ` Wei Zhao [this message]
2019-01-02  1:19     ` [dpdk-dev] [PATCH v2 1/2] net/ixgbe: " Zhang, Qi Z
2019-01-02  1:53       ` Zhao1, Wei
2019-01-02  2:25         ` Zhang, Qi Z
2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add support of hex string parser for flow API Wei Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1545716688-34608-2-git-send-email-wei.zhao1@intel.com \
    --to=wei.zhao1@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=stable@dpdk.org \
    --cc=wenzhuo.lu@intel.com \
    --cc=yuan.peng@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).