DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
@ 2018-12-17  5:53 Zhao Wei
  2018-12-17  6:19 ` Zhao1, Wei
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Zhao Wei @ 2018-12-17  5:53 UTC (permalink / raw)
  To: dev; +Cc: adrien.mazarguil, stable, wenzhuo.lu, qi.z.zhang, Wei Zhao

There is need for users to use flexible byte filter on x550.
This patch enable it.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	/* flex byte mask */
 	if (info->mask.flex_bytes_mask == 0)
 		fdirm |= IXGBE_FDIRM_FLEX;
+	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+		fdirm |= IXGBE_FDIRM_L3P;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 	/* mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-			 IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdiripv6m;
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 	uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_mask;
 	const struct rte_flow_item_raw *raw_spec;
 	uint8_t j;
-
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
 	if (!pattern) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
-	rule->mask.flex_bytes_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
 		/* check mask */
 		if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_spec = item->spec;
+		rule->b_spec = TRUE;
 
 		/* check spec */
 		if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_eth *eth_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 	uint32_t j;
 
 	if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				&rule->ixgbe_fdir.formatted.tni_vni),
 				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
 		}
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
 			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
 		}
-	}
 
-	/* check if the next not void item is MAC */
-	item = next_no_void_pattern(pattern, item);
-	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * others should be masked.
 	 */
 
-	if (!item->mask) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-	}
-	rule->b_mask = TRUE;
-	eth_mask = item->mask;
-
-	/* Ether type should be masked. */
-	if (eth_mask->type) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-
-	/* src MAC address should be masked. */
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		if (eth_mask->src.addr_bytes[j]) {
-			memset(rule, 0,
-			       sizeof(struct ixgbe_fdir_rule));
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
-	rule->mask.mac_addr_byte_mask = 0;
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		/* It's a per byte mask. */
-		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-			rule->mask.mac_addr_byte_mask |= 0x1 << j;
-		} else if (eth_mask->dst.addr_bytes[j]) {
-			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		/*Not supported last point for range*/
+		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		rule->b_mask = TRUE;
+		eth_mask = item->mask;
+
+		/* Ether type should be masked. */
+		if (eth_mask->type) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
 
-	/* When no vlan, considered as full mask. */
-	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-	if (item->spec) {
-		rule->b_spec = TRUE;
-		eth_spec = item->spec;
-
-		/* Get the dst MAC. */
+		/* src MAC address should be masked. */
 		for (j = 0; j < ETHER_ADDR_LEN; j++) {
-			rule->ixgbe_fdir.formatted.inner_mac[j] =
-				eth_spec->dst.addr_bytes[j];
+			if (eth_mask->src.addr_bytes[j]) {
+				memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
+		}
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			/* It's a per byte mask. */
+			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+				rule->mask.mac_addr_byte_mask |= 0x1 << j;
+			} else if (eth_mask->dst.addr_bytes[j]) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
 		}
-	}
 
-	/**
-	 * Check if the next not void item is vlan or ipv4.
-	 * IPv6 is not supported.
-	 */
-	item = next_no_void_pattern(pattern, item);
-	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+			/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 			item, "Not supported last point for range");
-		return -rte_errno;
+			return -rte_errno;
+		}
 	}
 
+
 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 		if (!(item->spec && item->mask)) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 		rule->mask.vlan_tci_mask = vlan_mask->tci;
 		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
 		/* More than one tags are not supported. */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = item->spec;
+		rule->b_spec = TRUE;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 		/* check if the next not void item is END */
 		item = next_no_void_pattern(pattern, item);
-
 		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 		hw->mac.type != ixgbe_mac_X550EM_a)
 		return -ENOTSUP;
 
+	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		goto tunnel_filter;
+
 	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
 					actions, rule, error);
 
 	if (!ret)
 		goto step_next;
 
+tunnel_filter:
+
 	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
 					actions, rule, error);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-17  5:53 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Zhao Wei
@ 2018-12-17  6:19 ` Zhao1, Wei
  2018-12-24 12:13 ` Zhang, Qi Z
  2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
  2 siblings, 0 replies; 10+ messages in thread
From: Zhao1, Wei @ 2018-12-17  6:19 UTC (permalink / raw)
  To: dev, Peng, Yuan; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo, Zhang, Qi Z

Add yuan.peng@intel.com into mail loop

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, December 17, 2018 1:53 PM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++--
> -----------
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
> index e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>  	/* flex byte mask */
>  	if (info->mask.flex_bytes_mask == 0)
>  		fdirm |= IXGBE_FDIRM_FLEX;
> +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> +		fdirm |= IXGBE_FDIRM_L3P;
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  	/* mask VM pool and DIPv6 since there are currently not supported
>  	 * mask FLEX byte, it will be set in flex_conf
>  	 */
> -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -			 IXGBE_FDIRM_FLEX;
> +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>  	uint32_t fdiripv6m;
>  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>  	uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> +	/* flex byte mask */
> +	if (info->mask.flex_bytes_mask == 0)
> +		fdirm |= IXGBE_FDIRM_FLEX;
> +
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	const struct rte_flow_item_raw *raw_mask;
>  	const struct rte_flow_item_raw *raw_spec;
>  	uint8_t j;
> -
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
> +
>  	if (!pattern) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> -	rule->mask.flex_bytes_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
>  		/* check mask */
>  		if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> 
>  		/* check spec */
>  		if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	const struct rte_flow_item_eth *eth_mask;
>  	const struct rte_flow_item_vlan *vlan_spec;
>  	const struct rte_flow_item_vlan *vlan_mask;
> +	const struct rte_flow_item_raw *raw_mask;
> +	const struct rte_flow_item_raw *raw_spec;
>  	uint32_t j;
> 
>  	if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  				&rule->ixgbe_fdir.formatted.tni_vni),
>  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
>  		}
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/* Get the NVGRE info */
> @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
>  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
>  		}
> -	}
> 
> -	/* check if the next not void item is MAC */
> -	item = next_no_void_pattern(pattern, item);
> -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/**
> @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * others should be masked.
>  	 */
> 
> -	if (!item->mask) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -			item, "Not supported last point for range");
> -		return -rte_errno;
> -	}
> -	rule->b_mask = TRUE;
> -	eth_mask = item->mask;
> -
> -	/* Ether type should be masked. */
> -	if (eth_mask->type) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -
> -	/* src MAC address should be masked. */
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		if (eth_mask->src.addr_bytes[j]) {
> -			memset(rule, 0,
> -			       sizeof(struct ixgbe_fdir_rule));
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		if (!item->mask) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> -	rule->mask.mac_addr_byte_mask = 0;
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		/* It's a per byte mask. */
> -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> -		} else if (eth_mask->dst.addr_bytes[j]) {
> -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +		/*Not supported last point for range*/
> +		if (item->last) {
>  			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		rule->b_mask = TRUE;
> +		eth_mask = item->mask;
> +
> +		/* Ether type should be masked. */
> +		if (eth_mask->type) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> 
> -	/* When no vlan, considered as full mask. */
> -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> -
> -	if (item->spec) {
> -		rule->b_spec = TRUE;
> -		eth_spec = item->spec;
> -
> -		/* Get the dst MAC. */
> +		/* src MAC address should be masked. */
>  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> -				eth_spec->dst.addr_bytes[j];
> +			if (eth_mask->src.addr_bytes[j]) {
> +				memset(rule, 0,
> +			       sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
> +		}
> +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +			/* It's a per byte mask. */
> +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> +			} else if (eth_mask->dst.addr_bytes[j]) {
> +				memset(rule, 0, sizeof(struct
> ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
>  		}
> -	}
> 
> -	/**
> -	 * Check if the next not void item is vlan or ipv4.
> -	 * IPv6 is not supported.
> -	 */
> -	item = next_no_void_pattern(pattern, item);
> -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> +		if (item->spec) {
> +			rule->b_spec = TRUE;
> +			eth_spec = item->spec;
> +
> +			/* Get the dst MAC. */
> +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> +					eth_spec->dst.addr_bytes[j];
> +			}
> +		}
> +		/**
> +		 * Check if the next not void item is vlan or ipv4.
> +		 * IPv6 is not supported.
> +		 */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +			/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  			item, "Not supported last point for range");
> -		return -rte_errno;
> +			return -rte_errno;
> +		}
>  	}
> 
> +
>  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
>  		if (!(item->spec && item->mask)) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -
> 2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  		rule->mask.vlan_tci_mask = vlan_mask->tci;
>  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
>  		/* More than one tags are not supported. */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* Get the flex byte info */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> +		/* Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		/* mask should not be null */
> +		if (!item->mask || !item->spec) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
> +		/* check mask */
> +		if (raw_mask->relative != 0x1 ||
> +		    raw_mask->search != 0x1 ||
> +		    raw_mask->reserved != 0x0 ||
> +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> +		    raw_mask->limit != 0xffff ||
> +		    raw_mask->length != 0xffff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> +
> +		/* check spec */
> +		if (raw_spec->relative != 0 ||
> +		    raw_spec->search != 0 ||
> +		    raw_spec->reserved != 0 ||
> +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> +		    raw_spec->offset % 2 ||
> +		    raw_spec->limit != 0 ||
> +		    raw_spec->length != 2 ||
> +		    /* pattern can't be 0xffff */
> +		    (raw_spec->pattern[0] == 0xff &&
> +		     raw_spec->pattern[1] == 0xff)) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		/* check pattern mask */
> +		if (raw_mask->pattern[0] != 0xff ||
> +		    raw_mask->pattern[1] != 0xff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		rule->mask.flex_bytes_mask = 0xffff;
> +		rule->ixgbe_fdir.formatted.flex_bytes =
> +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> +			raw_spec->pattern[0];
> +		rule->flex_bytes_offset = raw_spec->offset;
>  		/* check if the next not void item is END */
>  		item = next_no_void_pattern(pattern, item);
> -
>  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
> @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
>  		hw->mac.type != ixgbe_mac_X550EM_a)
>  		return -ENOTSUP;
> 
> +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		goto tunnel_filter;
> +
>  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
>  					actions, rule, error);
> 
>  	if (!ret)
>  		goto step_next;
> 
> +tunnel_filter:
> +
>  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
>  					actions, rule, error);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-17  5:53 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Zhao Wei
  2018-12-17  6:19 ` Zhao1, Wei
@ 2018-12-24 12:13 ` Zhang, Qi Z
  2018-12-25  2:04   ` Zhao1, Wei
  2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
  2 siblings, 1 reply; 10+ messages in thread
From: Zhang, Qi Z @ 2018-12-24 12:13 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo

Hi Wei:

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, December 17, 2018 1:53 PM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.

It's difficult for me review a large patch without understand the purpose clearly.
For the description here, my understanding is you just try to enable an existing feature for a specific device ID.
But why we have so much changes in your patch, would you explain more detail about what is the gap here?
BTW, it's better to separate the patch into two patches, one for fdir layer and one for rte_flow layer.

Thanks
Qi
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274
> ++++++++++++++++++++++++++++-------------
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index
> e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>  	/* flex byte mask */
>  	if (info->mask.flex_bytes_mask == 0)
>  		fdirm |= IXGBE_FDIRM_FLEX;
> +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
> +		fdirm |= IXGBE_FDIRM_L3P;
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  	/* mask VM pool and DIPv6 since there are currently not supported
>  	 * mask FLEX byte, it will be set in flex_conf
>  	 */
> -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -			 IXGBE_FDIRM_FLEX;
> +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>  	uint32_t fdiripv6m;
>  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>  	uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> +	/* flex byte mask */
> +	if (info->mask.flex_bytes_mask == 0)
> +		fdirm |= IXGBE_FDIRM_FLEX;
> +
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --git
> a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	const struct rte_flow_item_raw *raw_mask;
>  	const struct rte_flow_item_raw *raw_spec;
>  	uint8_t j;
> -
>  	struct ixgbe_hw *hw =
> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> 
> +
>  	if (!pattern) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	 * value. So, we need not do anything for the not provided fields later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> -	rule->mask.flex_bytes_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
>  		/* check mask */
>  		if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> 
>  		/* check spec */
>  		if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	const struct rte_flow_item_eth *eth_mask;
>  	const struct rte_flow_item_vlan *vlan_spec;
>  	const struct rte_flow_item_vlan *vlan_mask;
> +	const struct rte_flow_item_raw *raw_mask;
> +	const struct rte_flow_item_raw *raw_spec;
>  	uint32_t j;
> 
>  	if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * value. So, we need not do anything for the not provided fields later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  				&rule->ixgbe_fdir.formatted.tni_vni),
>  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
>  		}
> +		/* check if the next not void item is MAC VLAN RAW or END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/* Get the NVGRE info */
> @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
>  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
>  		}
> -	}
> 
> -	/* check if the next not void item is MAC */
> -	item = next_no_void_pattern(pattern, item);
> -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> +		/* check if the next not void item is MAC VLAN RAW or END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/**
> @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * others should be masked.
>  	 */
> 
> -	if (!item->mask) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -			item, "Not supported last point for range");
> -		return -rte_errno;
> -	}
> -	rule->b_mask = TRUE;
> -	eth_mask = item->mask;
> -
> -	/* Ether type should be masked. */
> -	if (eth_mask->type) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -
> -	/* src MAC address should be masked. */
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		if (eth_mask->src.addr_bytes[j]) {
> -			memset(rule, 0,
> -			       sizeof(struct ixgbe_fdir_rule));
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		if (!item->mask) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> -	rule->mask.mac_addr_byte_mask = 0;
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		/* It's a per byte mask. */
> -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> -		} else if (eth_mask->dst.addr_bytes[j]) {
> -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +		/*Not supported last point for range*/
> +		if (item->last) {
>  			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		rule->b_mask = TRUE;
> +		eth_mask = item->mask;
> +
> +		/* Ether type should be masked. */
> +		if (eth_mask->type) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> 
> -	/* When no vlan, considered as full mask. */
> -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> -
> -	if (item->spec) {
> -		rule->b_spec = TRUE;
> -		eth_spec = item->spec;
> -
> -		/* Get the dst MAC. */
> +		/* src MAC address should be masked. */
>  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> -				eth_spec->dst.addr_bytes[j];
> +			if (eth_mask->src.addr_bytes[j]) {
> +				memset(rule, 0,
> +			       sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
> +		}
> +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +			/* It's a per byte mask. */
> +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> +			} else if (eth_mask->dst.addr_bytes[j]) {
> +				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
>  		}
> -	}
> 
> -	/**
> -	 * Check if the next not void item is vlan or ipv4.
> -	 * IPv6 is not supported.
> -	 */
> -	item = next_no_void_pattern(pattern, item);
> -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> +		if (item->spec) {
> +			rule->b_spec = TRUE;
> +			eth_spec = item->spec;
> +
> +			/* Get the dst MAC. */
> +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> +					eth_spec->dst.addr_bytes[j];
> +			}
> +		}
> +		/**
> +		 * Check if the next not void item is vlan or ipv4.
> +		 * IPv6 is not supported.
> +		 */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +			/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  			item, "Not supported last point for range");
> -		return -rte_errno;
> +			return -rte_errno;
> +		}
>  	}
> 
> +
>  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
>  		if (!(item->spec && item->mask)) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2736,10
> +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
>  		rule->mask.vlan_tci_mask = vlan_mask->tci;
>  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
>  		/* More than one tags are not supported. */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* Get the flex byte info */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> +		/* Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		/* mask should not be null */
> +		if (!item->mask || !item->spec) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
> +		/* check mask */
> +		if (raw_mask->relative != 0x1 ||
> +		    raw_mask->search != 0x1 ||
> +		    raw_mask->reserved != 0x0 ||
> +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> +		    raw_mask->limit != 0xffff ||
> +		    raw_mask->length != 0xffff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> +
> +		/* check spec */
> +		if (raw_spec->relative != 0 ||
> +		    raw_spec->search != 0 ||
> +		    raw_spec->reserved != 0 ||
> +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> +		    raw_spec->offset % 2 ||
> +		    raw_spec->limit != 0 ||
> +		    raw_spec->length != 2 ||
> +		    /* pattern can't be 0xffff */
> +		    (raw_spec->pattern[0] == 0xff &&
> +		     raw_spec->pattern[1] == 0xff)) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		/* check pattern mask */
> +		if (raw_mask->pattern[0] != 0xff ||
> +		    raw_mask->pattern[1] != 0xff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		rule->mask.flex_bytes_mask = 0xffff;
> +		rule->ixgbe_fdir.formatted.flex_bytes =
> +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> +			raw_spec->pattern[0];
> +		rule->flex_bytes_offset = raw_spec->offset;
>  		/* check if the next not void item is END */
>  		item = next_no_void_pattern(pattern, item);
> -
>  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
> @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
>  		hw->mac.type != ixgbe_mac_X550EM_a)
>  		return -ENOTSUP;
> 
> +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		goto tunnel_filter;
> +
>  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
>  					actions, rule, error);
> 
>  	if (!ret)
>  		goto step_next;
> 
> +tunnel_filter:
> +
>  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
>  					actions, rule, error);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-24 12:13 ` Zhang, Qi Z
@ 2018-12-25  2:04   ` Zhao1, Wei
  0 siblings, 0 replies; 10+ messages in thread
From: Zhao1, Wei @ 2018-12-25  2:04 UTC (permalink / raw)
  To: Zhang, Qi Z, dev; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo

Hi, qi

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Monday, December 24, 2018 8:13 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: RE: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> Hi Wei:
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Monday, December 17, 2018 1:53 PM
> > To: dev@dpdk.org
> > Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> >
> > There is need for users to use flexible byte filter on x550.
> > This patch enable it.
> 
> It's difficult for me review a large patch without understand the purpose
> clearly.
> For the description here, my understanding is you just try to enable an
> existing feature for a specific device ID.
> But why we have so much changes in your patch, would you explain more
> detail about what is the gap here?

It is because ixgbe flow parer code do not support tunnel mode flexible byte filter for x550,
So I have to enable it in  ixgbe_parse_fdir_filter_tunnel().
> BTW, it's better to separate the patch into two patches, one for fdir layer and
> one for rte_flow layer.

Yes, I will split it in v2, also I will add some code for flow CLI for parser HEX number, in order to not using string which pass ASIC number.

> 
> Thanks
> Qi
> >
> > Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for
> > X550")
> > Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
> >  drivers/net/ixgbe/ixgbe_flow.c | 274
> > ++++++++++++++++++++++++++++-------------
> >  2 files changed, 195 insertions(+), 88 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_fdir.c
> > b/drivers/net/ixgbe/ixgbe_fdir.c index
> > e559f0f..deb9a21 100644
> > --- a/drivers/net/ixgbe/ixgbe_fdir.c
> > +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> > @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> *dev)
> >  	/* flex byte mask */
> >  	if (info->mask.flex_bytes_mask == 0)
> >  		fdirm |= IXGBE_FDIRM_FLEX;
> > +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> > +		fdirm |= IXGBE_FDIRM_L3P;
> >
> >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> >
> > @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev
> *dev)
> >  	/* mask VM pool and DIPv6 since there are currently not supported
> >  	 * mask FLEX byte, it will be set in flex_conf
> >  	 */
> > -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> > -			 IXGBE_FDIRM_FLEX;
> > +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
> >  	uint32_t fdiripv6m;
> >  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> >  	uint16_t mac_mask;
> > @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev
> *dev)
> >  		return -EINVAL;
> >  	}
> >
> > +	/* flex byte mask */
> > +	if (info->mask.flex_bytes_mask == 0)
> > +		fdirm |= IXGBE_FDIRM_FLEX;
> > +
> >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> >
> >  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git
> > a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
> > index
> > f0fafeb..dc210c5 100644
> > --- a/drivers/net/ixgbe/ixgbe_flow.c
> > +++ b/drivers/net/ixgbe/ixgbe_flow.c
> > @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  	const struct rte_flow_item_raw *raw_mask;
> >  	const struct rte_flow_item_raw *raw_spec;
> >  	uint8_t j;
> > -
> >  	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >
> > +
> >  	if (!pattern) {
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  	 * value. So, we need not do anything for the not provided fields
> later.
> >  	 */
> >  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> > -	rule->mask.vlan_tci_mask = 0;
> > -	rule->mask.flex_bytes_mask = 0;
> > +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> >
> >  	/**
> >  	 * The first not void item should be @@ -1665,7 +1663,8 @@
> > ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
> >  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> > -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> > +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
> >  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  		}
> >
> >  		raw_mask = item->mask;
> > +		rule->b_mask = TRUE;
> >
> >  		/* check mask */
> >  		if (raw_mask->relative != 0x1 ||
> > @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  		}
> >
> >  		raw_spec = item->spec;
> > +		rule->b_spec = TRUE;
> >
> >  		/* check spec */
> >  		if (raw_spec->relative != 0 ||
> > @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	const struct rte_flow_item_eth *eth_mask;
> >  	const struct rte_flow_item_vlan *vlan_spec;
> >  	const struct rte_flow_item_vlan *vlan_mask;
> > +	const struct rte_flow_item_raw *raw_mask;
> > +	const struct rte_flow_item_raw *raw_spec;
> >  	uint32_t j;
> >
> >  	if (!pattern) {
> > @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	 * value. So, we need not do anything for the not provided fields
> later.
> >  	 */
> >  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> > -	rule->mask.vlan_tci_mask = 0;
> > +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> >
> >  	/**
> >  	 * The first not void item should be @@ -2364,7 +2366,8 @@
> > ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
> >  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> > -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> > +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
> >  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  				&rule->ixgbe_fdir.formatted.tni_vni),
> >  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
> >  		}
> > +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END){
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> >  	/* Get the NVGRE info */
> > @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
> >  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
> >  		}
> > -	}
> >
> > -	/* check if the next not void item is MAC */
> > -	item = next_no_void_pattern(pattern, item);
> > -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END){
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> >  	/**
> > @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	 * others should be masked.
> >  	 */
> >
> > -	if (!item->mask) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -	/*Not supported last point for range*/
> > -	if (item->last) {
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > -			item, "Not supported last point for range");
> > -		return -rte_errno;
> > -	}
> > -	rule->b_mask = TRUE;
> > -	eth_mask = item->mask;
> > -
> > -	/* Ether type should be masked. */
> > -	if (eth_mask->type) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -
> > -	/* src MAC address should be masked. */
> > -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -		if (eth_mask->src.addr_bytes[j]) {
> > -			memset(rule, 0,
> > -			       sizeof(struct ixgbe_fdir_rule));
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> > +		if (!item->mask) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  			rte_flow_error_set(error, EINVAL,
> >  				RTE_FLOW_ERROR_TYPE_ITEM,
> >  				item, "Not supported by fdir filter");
> >  			return -rte_errno;
> >  		}
> > -	}
> > -	rule->mask.mac_addr_byte_mask = 0;
> > -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -		/* It's a per byte mask. */
> > -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> > -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> > -		} else if (eth_mask->dst.addr_bytes[j]) {
> > -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +		/*Not supported last point for range*/
> > +		if (item->last) {
> >  			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				item, "Not supported last point for range");
> > +			return -rte_errno;
> > +		}
> > +		rule->b_mask = TRUE;
> > +		eth_mask = item->mask;
> > +
> > +		/* Ether type should be masked. */
> > +		if (eth_mask->type) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> >  				RTE_FLOW_ERROR_TYPE_ITEM,
> >  				item, "Not supported by fdir filter");
> >  			return -rte_errno;
> >  		}
> > -	}
> >
> > -	/* When no vlan, considered as full mask. */
> > -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> > -
> > -	if (item->spec) {
> > -		rule->b_spec = TRUE;
> > -		eth_spec = item->spec;
> > -
> > -		/* Get the dst MAC. */
> > +		/* src MAC address should be masked. */
> >  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> > -				eth_spec->dst.addr_bytes[j];
> > +			if (eth_mask->src.addr_bytes[j]) {
> > +				memset(rule, 0,
> > +			       sizeof(struct ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +				return -rte_errno;
> > +			}
> > +		}
> > +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > +			/* It's a per byte mask. */
> > +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> > +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> > +			} else if (eth_mask->dst.addr_bytes[j]) {
> > +				memset(rule, 0, sizeof(struct
> ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> > +					RTE_FLOW_ERROR_TYPE_ITEM,
> > +					item, "Not supported by fdir filter");
> > +				return -rte_errno;
> > +			}
> >  		}
> > -	}
> >
> > -	/**
> > -	 * Check if the next not void item is vlan or ipv4.
> > -	 * IPv6 is not supported.
> > -	 */
> > -	item = next_no_void_pattern(pattern, item);
> > -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> > -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -	/*Not supported last point for range*/
> > -	if (item->last) {
> > -		rte_flow_error_set(error, EINVAL,
> > +		if (item->spec) {
> > +			rule->b_spec = TRUE;
> > +			eth_spec = item->spec;
> > +
> > +			/* Get the dst MAC. */
> > +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> > +					eth_spec->dst.addr_bytes[j];
> > +			}
> > +		}
> > +		/**
> > +		 * Check if the next not void item is vlan or ipv4.
> > +		 * IPv6 is not supported.
> > +		 */
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +			/*Not supported last point for range*/
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> >  			item, "Not supported last point for range");
> > -		return -rte_errno;
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> > +
> >  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
> >  		if (!(item->spec && item->mask)) {
> >  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -
> 2736,10
> > +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr
> > +*attr,
> >  		rule->mask.vlan_tci_mask = vlan_mask->tci;
> >  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
> >  		/* More than one tags are not supported. */
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +	}
> > +
> > +	/* Get the flex byte info */
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> > +		/* Not supported last point for range*/
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				item, "Not supported last point for range");
> > +			return -rte_errno;
> > +		}
> > +		/* mask should not be null */
> > +		if (!item->mask || !item->spec) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		raw_mask = item->mask;
> > +		rule->b_mask = TRUE;
> >
> > +		/* check mask */
> > +		if (raw_mask->relative != 0x1 ||
> > +		    raw_mask->search != 0x1 ||
> > +		    raw_mask->reserved != 0x0 ||
> > +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> > +		    raw_mask->limit != 0xffff ||
> > +		    raw_mask->length != 0xffff) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		raw_spec = item->spec;
> > +		rule->b_spec = TRUE;
> > +
> > +		/* check spec */
> > +		if (raw_spec->relative != 0 ||
> > +		    raw_spec->search != 0 ||
> > +		    raw_spec->reserved != 0 ||
> > +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> > +		    raw_spec->offset % 2 ||
> > +		    raw_spec->limit != 0 ||
> > +		    raw_spec->length != 2 ||
> > +		    /* pattern can't be 0xffff */
> > +		    (raw_spec->pattern[0] == 0xff &&
> > +		     raw_spec->pattern[1] == 0xff)) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		/* check pattern mask */
> > +		if (raw_mask->pattern[0] != 0xff ||
> > +		    raw_mask->pattern[1] != 0xff) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		rule->mask.flex_bytes_mask = 0xffff;
> > +		rule->ixgbe_fdir.formatted.flex_bytes =
> > +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> > +			raw_spec->pattern[0];
> > +		rule->flex_bytes_offset = raw_spec->offset;
> >  		/* check if the next not void item is END */
> >  		item = next_no_void_pattern(pattern, item);
> > -
> >  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
> >  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  			rte_flow_error_set(error, EINVAL,
> > @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev
> *dev,
> >  		hw->mac.type != ixgbe_mac_X550EM_a)
> >  		return -ENOTSUP;
> >
> > +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > +		goto tunnel_filter;
> > +
> >  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
> >  					actions, rule, error);
> >
> >  	if (!ret)
> >  		goto step_next;
> >
> > +tunnel_filter:
> > +
> >  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
> >  					actions, rule, error);
> >
> > --
> > 2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 0/2] enable x550 flexible byte filter
  2018-12-17  5:53 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Zhao Wei
  2018-12-17  6:19 ` Zhao1, Wei
  2018-12-24 12:13 ` Zhang, Qi Z
@ 2018-12-25  5:44 ` Wei Zhao
  2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 1/2] net/ixgbe: " Wei Zhao
  2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add support of hex string parser for flow API Wei Zhao
  2 siblings, 2 replies; 10+ messages in thread
From: Wei Zhao @ 2018-12-25  5:44 UTC (permalink / raw)
  To: dev; +Cc: stable, wenzhuo.lu, qi.z.zhang, yuan.peng

There is need for users to use flexible byte filter on x550.
Command line need to pase hex number. 
This patch enable all of these feature.

---
v2:
add some paser code for flow CLI and split it in v2 

Zhao Wei (2):
  net/ixgbe: enable x550 flexible byte filter
  app/testpmd: add support of hex string parser for flow API

 app/test-pmd/cmdline_flow.c    | 141 ++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 3 files changed, 335 insertions(+), 89 deletions(-)

-- 
2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
  2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
@ 2018-12-25  5:44   ` Wei Zhao
  2019-01-02  1:19     ` Zhang, Qi Z
  2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add support of hex string parser for flow API Wei Zhao
  1 sibling, 1 reply; 10+ messages in thread
From: Wei Zhao @ 2018-12-25  5:44 UTC (permalink / raw)
  To: dev; +Cc: stable, wenzhuo.lu, qi.z.zhang, yuan.peng, Wei Zhao

There is need for users to use flexible byte filter on x550.
x550 support IP mode and tunnel mode flexible byte filter.
tunnel mode flexible byte filter is used for vxlan packets
and so on. It can be used combined with FDIR tunnel filter.
By now, ixgbe PMD flow parer code do not support tunnel mode
flexible byte filter for x550, So I have to enable it in function
ixgbe_parse_fdir_filter_tunnel(). Although IP mode flexible byte
filter parser is support in function ixgbe_parse_fdir_filter_normal(),
but some flow like  "flow create 0 ingress pattern raw pattern is
0xab / end actions queue index 3 / end" need to be support, so
parser code also need change a little.
This patch enable all of these feature.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	/* flex byte mask */
 	if (info->mask.flex_bytes_mask == 0)
 		fdirm |= IXGBE_FDIRM_FLEX;
+	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+		fdirm |= IXGBE_FDIRM_L3P;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 	/* mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-			 IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdiripv6m;
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 	uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_mask;
 	const struct rte_flow_item_raw *raw_spec;
 	uint8_t j;
-
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
 	if (!pattern) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
-	rule->mask.flex_bytes_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
 		/* check mask */
 		if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_spec = item->spec;
+		rule->b_spec = TRUE;
 
 		/* check spec */
 		if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_eth *eth_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 	uint32_t j;
 
 	if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				&rule->ixgbe_fdir.formatted.tni_vni),
 				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
 		}
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
 			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
 		}
-	}
 
-	/* check if the next not void item is MAC */
-	item = next_no_void_pattern(pattern, item);
-	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * others should be masked.
 	 */
 
-	if (!item->mask) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-	}
-	rule->b_mask = TRUE;
-	eth_mask = item->mask;
-
-	/* Ether type should be masked. */
-	if (eth_mask->type) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-
-	/* src MAC address should be masked. */
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		if (eth_mask->src.addr_bytes[j]) {
-			memset(rule, 0,
-			       sizeof(struct ixgbe_fdir_rule));
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
-	rule->mask.mac_addr_byte_mask = 0;
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		/* It's a per byte mask. */
-		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-			rule->mask.mac_addr_byte_mask |= 0x1 << j;
-		} else if (eth_mask->dst.addr_bytes[j]) {
-			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		/*Not supported last point for range*/
+		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		rule->b_mask = TRUE;
+		eth_mask = item->mask;
+
+		/* Ether type should be masked. */
+		if (eth_mask->type) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
 
-	/* When no vlan, considered as full mask. */
-	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-	if (item->spec) {
-		rule->b_spec = TRUE;
-		eth_spec = item->spec;
-
-		/* Get the dst MAC. */
+		/* src MAC address should be masked. */
 		for (j = 0; j < ETHER_ADDR_LEN; j++) {
-			rule->ixgbe_fdir.formatted.inner_mac[j] =
-				eth_spec->dst.addr_bytes[j];
+			if (eth_mask->src.addr_bytes[j]) {
+				memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
+		}
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			/* It's a per byte mask. */
+			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+				rule->mask.mac_addr_byte_mask |= 0x1 << j;
+			} else if (eth_mask->dst.addr_bytes[j]) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
 		}
-	}
 
-	/**
-	 * Check if the next not void item is vlan or ipv4.
-	 * IPv6 is not supported.
-	 */
-	item = next_no_void_pattern(pattern, item);
-	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+			/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 			item, "Not supported last point for range");
-		return -rte_errno;
+			return -rte_errno;
+		}
 	}
 
+
 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 		if (!(item->spec && item->mask)) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 		rule->mask.vlan_tci_mask = vlan_mask->tci;
 		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
 		/* More than one tags are not supported. */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = item->spec;
+		rule->b_spec = TRUE;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 		/* check if the next not void item is END */
 		item = next_no_void_pattern(pattern, item);
-
 		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 		hw->mac.type != ixgbe_mac_X550EM_a)
 		return -ENOTSUP;
 
+	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		goto tunnel_filter;
+
 	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
 					actions, rule, error);
 
 	if (!ret)
 		goto step_next;
 
+tunnel_filter:
+
 	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
 					actions, rule, error);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] app/testpmd: add support of hex string parser for flow API
  2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
  2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 1/2] net/ixgbe: " Wei Zhao
@ 2018-12-25  5:44   ` Wei Zhao
  1 sibling, 0 replies; 10+ messages in thread
From: Wei Zhao @ 2018-12-25  5:44 UTC (permalink / raw)
  To: dev; +Cc: stable, wenzhuo.lu, qi.z.zhang, yuan.peng, Wei Zhao

There is need for users to use flexible byte filter on ixgbe.
The flexible byte should be pass down as hex number not char
string. This patch enable users to make configuration using hex
number for flexible byte filter. cmdline flow parse HEX number,
in order to not using string which pass ASIC number.

Fixes: abc3d81aca1b ("app/testpmd: add item raw to flow command")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 app/test-pmd/cmdline_flow.c | 141 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 140 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 8b7a5c0..ba9b692 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -35,6 +35,7 @@ enum index {
 	PREFIX,
 	BOOLEAN,
 	STRING,
+	HEX,
 	MAC_ADDR,
 	IPV4_ADDR,
 	IPV6_ADDR,
@@ -1122,6 +1123,9 @@ static int parse_boolean(struct context *, const struct token *,
 static int parse_string(struct context *, const struct token *,
 			const char *, unsigned int,
 			void *, unsigned int);
+static int parse_hex(struct context *ctx, const struct token *token,
+			const char *str, unsigned int len,
+			void *buf, unsigned int size);
 static int parse_mac_addr(struct context *, const struct token *,
 			  const char *, unsigned int,
 			  void *, unsigned int);
@@ -1198,6 +1202,13 @@ static const struct token token_list[] = {
 		.call = parse_string,
 		.comp = comp_none,
 	},
+	[HEX] = {
+		.name = "{hex}",
+		.type = "HEX",
+		.help = "fixed string",
+		.call = parse_hex,
+		.comp = comp_none,
+	},
 	[MAC_ADDR] = {
 		.name = "{MAC address}",
 		.type = "MAC-48",
@@ -1544,7 +1555,7 @@ static const struct token token_list[] = {
 		.name = "pattern",
 		.help = "byte string to look for",
 		.next = NEXT(item_raw,
-			     NEXT_ENTRY(STRING),
+			     NEXT_ENTRY(HEX),
 			     NEXT_ENTRY(ITEM_PARAM_IS,
 					ITEM_PARAM_SPEC,
 					ITEM_PARAM_MASK)),
@@ -4441,6 +4452,134 @@ parse_string(struct context *ctx, const struct token *token,
 	return -1;
 }
 
+static uint32_t
+get_hex_val(char c)
+{
+	switch (c) {
+	case '0': case '1': case '2': case '3': case '4': case '5':
+	case '6': case '7': case '8': case '9':
+		return c - '0';
+	case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+		return c - 'A' + 10;
+	case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+		return c - 'a' + 10;
+	default:
+		return 0;
+	}
+}
+
+static int
+parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
+{
+	const char *c;
+	uint32_t i;
+
+	/* Check input parameters */
+	if ((src == NULL) ||
+		(dst == NULL) ||
+		(size == NULL) ||
+		(*size == 0))
+		return -1;
+	if ((*size & 1) != 0)
+		return -1;
+
+	for (c = src, i = 0; i < *size; c++, i++) {
+		if ((((*c) >= '0') && ((*c) <= '9')) ||
+			(((*c) >= 'A') && ((*c) <= 'F')) ||
+			(((*c) >= 'a') && ((*c) <= 'f')))
+			continue;
+
+		return -1;
+	}
+	*size = *size / 2;
+
+	/* Convert chars to bytes */
+	for (i = 0; i < *size; i++)
+		dst[i] = get_hex_val(src[2 * i]) * 16 +
+			get_hex_val(src[2 * i + 1]);
+
+	return 0;
+}
+
+static int
+parse_hex(struct context *ctx, const struct token *token,
+		const char *str, unsigned int len,
+		void *buf, unsigned int size)
+{
+	const struct arg *arg_data = pop_args(ctx);
+	const struct arg *arg_len = pop_args(ctx);
+	const struct arg *arg_addr = pop_args(ctx);
+	char tmp[16]; /* Ought to be enough. */
+	int ret;
+	unsigned int hexlen = len;
+	uint8_t hex_tmp[16];
+
+	/* Arguments are expected. */
+	if (!arg_data)
+		return -1;
+	if (!arg_len) {
+		push_args(ctx, arg_data);
+		return -1;
+	}
+	if (!arg_addr) {
+		push_args(ctx, arg_len);
+		push_args(ctx, arg_data);
+		return -1;
+	}
+	size = arg_data->size;
+	/* Bit-mask fill is not supported. */
+	if (arg_data->mask || size < len)
+		goto error;
+	if (!ctx->object)
+		return len;
+
+	/* translate bytes string to array. */
+	if (str[0] == '0' && ((str[1] == 'x') ||
+			(str[1] == 'X'))) {
+		str += 2;
+		hexlen -= 2;
+	}
+	parse_hex_string(str, hex_tmp, &hexlen);
+	/* Let parse_int() fill length information first. */
+	ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
+	if (ret < 0)
+		goto error;
+	push_args(ctx, arg_len);
+	ret = parse_int(ctx, token, tmp, ret, NULL, 0);
+	if (ret < 0) {
+		pop_args(ctx);
+		goto error;
+	}
+	buf = (uint8_t *)ctx->object + arg_data->offset;
+	/* Output buffer is not necessarily NUL-terminated. */
+	memcpy(buf, hex_tmp, hexlen);
+	memset((uint8_t *)buf + len, 0x00, size - hexlen);
+	if (ctx->objmask)
+		memset((uint8_t *)ctx->objmask + arg_data->offset,
+					0xff, hexlen);
+	/* Save address if requested. */
+	if (arg_addr->size) {
+		memcpy((uint8_t *)ctx->object + arg_addr->offset,
+		       (void *[]){
+			(uint8_t *)ctx->object + arg_data->offset
+		       },
+		       arg_addr->size);
+		if (ctx->objmask)
+			memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
+			       (void *[]){
+				(uint8_t *)ctx->objmask + arg_data->offset
+			       },
+			       arg_addr->size);
+	}
+	return len;
+error:
+	push_args(ctx, arg_addr);
+	push_args(ctx, arg_len);
+	push_args(ctx, arg_data);
+	return -1;
+
+}
+
 /**
  * Parse a MAC address.
  *
-- 
2.7.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
  2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 1/2] net/ixgbe: " Wei Zhao
@ 2019-01-02  1:19     ` Zhang, Qi Z
  2019-01-02  1:53       ` Zhao1, Wei
  0 siblings, 1 reply; 10+ messages in thread
From: Zhang, Qi Z @ 2019-01-02  1:19 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: stable, Lu, Wenzhuo, Peng, Yuan

HI Wei:

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Tuesday, December 25, 2018 1:45 PM
> To: dev@dpdk.org
> Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> x550 support IP mode and tunnel mode flexible byte filter.
> tunnel mode flexible byte filter is used for vxlan packets and so on. It can be
> used combined with FDIR tunnel filter.
> By now, ixgbe PMD flow parer code do not support tunnel mode flexible byte
> filter for x550, So I have to enable it in function ixgbe_parse_fdir_filter_tunnel().
> Although IP mode flexible byte filter parser is support in function
> ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0 ingress
> pattern raw pattern is 0xab / end actions queue index 3 / end" need to be
> support, so parser code also need change a little.
> This patch enable all of these feature.

I would suggest to divide this patch into 3 patches
1. more accurate input set mask setup (include all changes in ixgbe_fdir.c)
2. support flexbyte without IP layer (include changes in ixgbe_parse_fdir_filter_normal)
3. support flexbyte in tunnel mode (include changes in ixgbe_parse_fdir_flilter_tunnel)

Btw, please make sure to sync the acceptable flow pattern/action description above function ixgbe_parse_fdir_filter_xxxxx to their new behaviors

Thanks
Qi

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
  2019-01-02  1:19     ` Zhang, Qi Z
@ 2019-01-02  1:53       ` Zhao1, Wei
  2019-01-02  2:25         ` Zhang, Qi Z
  0 siblings, 1 reply; 10+ messages in thread
From: Zhao1, Wei @ 2019-01-02  1:53 UTC (permalink / raw)
  To: Zhang, Qi Z, dev; +Cc: stable, Lu, Wenzhuo, Peng, Yuan

Hi, qi  

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Wednesday, January 2, 2019 9:19 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Peng, Yuan
> <yuan.peng@intel.com>
> Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> HI Wei:
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Tuesday, December 25, 2018 1:45 PM
> > To: dev@dpdk.org
> > Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> >
> > There is need for users to use flexible byte filter on x550.
> > x550 support IP mode and tunnel mode flexible byte filter.
> > tunnel mode flexible byte filter is used for vxlan packets and so on.
> > It can be used combined with FDIR tunnel filter.
> > By now, ixgbe PMD flow parer code do not support tunnel mode flexible
> > byte filter for x550, So I have to enable it in function
> ixgbe_parse_fdir_filter_tunnel().
> > Although IP mode flexible byte filter parser is support in function
> > ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0
> > ingress pattern raw pattern is 0xab / end actions queue index 3 / end"
> > need to be support, so parser code also need change a little.
> > This patch enable all of these feature.
> 
> I would suggest to divide this patch into 3 patches 1. more accurate input set
> mask setup (include all changes in ixgbe_fdir.c) 2. support flexbyte without IP
> layer (include changes in ixgbe_parse_fdir_filter_normal) 3. support flexbyte
> in tunnel mode (include changes in ixgbe_parse_fdir_flilter_tunnel)
> 
> Btw, please make sure to sync the acceptable flow pattern/action description
> above function ixgbe_parse_fdir_filter_xxxxx to their new behaviors

There will be 4 patch in this set, because there is other for flow_cmdline parser, is that ok?

> 
> Thanks
> Qi

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
  2019-01-02  1:53       ` Zhao1, Wei
@ 2019-01-02  2:25         ` Zhang, Qi Z
  0 siblings, 0 replies; 10+ messages in thread
From: Zhang, Qi Z @ 2019-01-02  2:25 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: stable, Lu, Wenzhuo, Peng, Yuan



> -----Original Message-----
> From: Zhao1, Wei
> Sent: Wednesday, January 2, 2019 9:54 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; dev@dpdk.org
> Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Peng, Yuan
> <yuan.peng@intel.com>
> Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> 
> Hi, qi
> 
> > -----Original Message-----
> > From: Zhang, Qi Z
> > Sent: Wednesday, January 2, 2019 9:19 AM
> > To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> > Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>
> > Subject: RE: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte
> > filter
> >
> > HI Wei:
> >
> > > -----Original Message-----
> > > From: Zhao1, Wei
> > > Sent: Tuesday, December 25, 2018 1:45 PM
> > > To: dev@dpdk.org
> > > Cc: stable@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z
> > > <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Zhao1, Wei
> > > <wei.zhao1@intel.com>
> > > Subject: [PATCH v2 1/2] net/ixgbe: enable x550 flexible byte filter
> > >
> > > There is need for users to use flexible byte filter on x550.
> > > x550 support IP mode and tunnel mode flexible byte filter.
> > > tunnel mode flexible byte filter is used for vxlan packets and so on.
> > > It can be used combined with FDIR tunnel filter.
> > > By now, ixgbe PMD flow parer code do not support tunnel mode
> > > flexible byte filter for x550, So I have to enable it in function
> > ixgbe_parse_fdir_filter_tunnel().
> > > Although IP mode flexible byte filter parser is support in function
> > > ixgbe_parse_fdir_filter_normal(), but some flow like  "flow create 0
> > > ingress pattern raw pattern is 0xab / end actions queue index 3 / end"
> > > need to be support, so parser code also need change a little.
> > > This patch enable all of these feature.
> >
> > I would suggest to divide this patch into 3 patches 1. more accurate
> > input set mask setup (include all changes in ixgbe_fdir.c) 2. support
> > flexbyte without IP layer (include changes in
> > ixgbe_parse_fdir_filter_normal) 3. support flexbyte in tunnel mode
> > (include changes in ixgbe_parse_fdir_flilter_tunnel)
> >
> > Btw, please make sure to sync the acceptable flow pattern/action
> > description above function ixgbe_parse_fdir_filter_xxxxx to their new
> > behaviors
> 
> There will be 4 patch in this set, because there is other for flow_cmdline parser,
> is that ok?

it's better to separate patch 4, it's for cmdline enhancement, not driver related, it can be reviewed, merged independently. 
> 
> >
> > Thanks
> > Qi

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-01-02  2:26 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-17  5:53 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Zhao Wei
2018-12-17  6:19 ` Zhao1, Wei
2018-12-24 12:13 ` Zhang, Qi Z
2018-12-25  2:04   ` Zhao1, Wei
2018-12-25  5:44 ` [dpdk-dev] [PATCH v2 0/2] " Wei Zhao
2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 1/2] net/ixgbe: " Wei Zhao
2019-01-02  1:19     ` Zhang, Qi Z
2019-01-02  1:53       ` Zhao1, Wei
2019-01-02  2:25         ` Zhang, Qi Z
2018-12-25  5:44   ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add support of hex string parser for flow API Wei Zhao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).