DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
@ 2019-01-02  6:31 Wei Zhao
  2019-01-02  7:02 ` Zhao1, Wei
  0 siblings, 1 reply; 6+ messages in thread
From: Wei Zhao @ 2019-01-02  6:31 UTC (permalink / raw)
  To: dev; +Cc: stable, jingjing.wu, Wei Zhao

There is need for users to use flexible byte filter on x550.
This patch enable it.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	/* flex byte mask */
 	if (info->mask.flex_bytes_mask == 0)
 		fdirm |= IXGBE_FDIRM_FLEX;
+	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+		fdirm |= IXGBE_FDIRM_L3P;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 	/* mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-			 IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdiripv6m;
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 	uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_mask;
 	const struct rte_flow_item_raw *raw_spec;
 	uint8_t j;
-
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
 	if (!pattern) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
-	rule->mask.flex_bytes_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
 		/* check mask */
 		if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_spec = item->spec;
+		rule->b_spec = TRUE;
 
 		/* check spec */
 		if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_eth *eth_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 	uint32_t j;
 
 	if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				&rule->ixgbe_fdir.formatted.tni_vni),
 				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
 		}
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
 			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
 		}
-	}
 
-	/* check if the next not void item is MAC */
-	item = next_no_void_pattern(pattern, item);
-	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * others should be masked.
 	 */
 
-	if (!item->mask) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-	}
-	rule->b_mask = TRUE;
-	eth_mask = item->mask;
-
-	/* Ether type should be masked. */
-	if (eth_mask->type) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-
-	/* src MAC address should be masked. */
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		if (eth_mask->src.addr_bytes[j]) {
-			memset(rule, 0,
-			       sizeof(struct ixgbe_fdir_rule));
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
-	rule->mask.mac_addr_byte_mask = 0;
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		/* It's a per byte mask. */
-		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-			rule->mask.mac_addr_byte_mask |= 0x1 << j;
-		} else if (eth_mask->dst.addr_bytes[j]) {
-			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		/*Not supported last point for range*/
+		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		rule->b_mask = TRUE;
+		eth_mask = item->mask;
+
+		/* Ether type should be masked. */
+		if (eth_mask->type) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
 
-	/* When no vlan, considered as full mask. */
-	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-	if (item->spec) {
-		rule->b_spec = TRUE;
-		eth_spec = item->spec;
-
-		/* Get the dst MAC. */
+		/* src MAC address should be masked. */
 		for (j = 0; j < ETHER_ADDR_LEN; j++) {
-			rule->ixgbe_fdir.formatted.inner_mac[j] =
-				eth_spec->dst.addr_bytes[j];
+			if (eth_mask->src.addr_bytes[j]) {
+				memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
+		}
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			/* It's a per byte mask. */
+			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+				rule->mask.mac_addr_byte_mask |= 0x1 << j;
+			} else if (eth_mask->dst.addr_bytes[j]) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
 		}
-	}
 
-	/**
-	 * Check if the next not void item is vlan or ipv4.
-	 * IPv6 is not supported.
-	 */
-	item = next_no_void_pattern(pattern, item);
-	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+			/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 			item, "Not supported last point for range");
-		return -rte_errno;
+			return -rte_errno;
+		}
 	}
 
+
 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 		if (!(item->spec && item->mask)) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 		rule->mask.vlan_tci_mask = vlan_mask->tci;
 		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
 		/* More than one tags are not supported. */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = item->spec;
+		rule->b_spec = TRUE;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 		/* check if the next not void item is END */
 		item = next_no_void_pattern(pattern, item);
-
 		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 		hw->mac.type != ixgbe_mac_X550EM_a)
 		return -ENOTSUP;
 
+	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		goto tunnel_filter;
+
 	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
 					actions, rule, error);
 
 	if (!ret)
 		goto step_next;
 
+tunnel_filter:
+
 	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
 					actions, rule, error);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2019-01-02  6:31 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Wei Zhao
@ 2019-01-02  7:02 ` Zhao1, Wei
  0 siblings, 0 replies; 6+ messages in thread
From: Zhao1, Wei @ 2019-01-02  7:02 UTC (permalink / raw)
  To: dev; +Cc: stable, Wu, Jingjing

Some error in patch send, please ignore  this patch.

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Wednesday, January 2, 2019 2:31 PM
> To: dev@dpdk.org
> Cc: stable@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++--
> -----------
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
> index e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>  	/* flex byte mask */
>  	if (info->mask.flex_bytes_mask == 0)
>  		fdirm |= IXGBE_FDIRM_FLEX;
> +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> +		fdirm |= IXGBE_FDIRM_L3P;
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  	/* mask VM pool and DIPv6 since there are currently not supported
>  	 * mask FLEX byte, it will be set in flex_conf
>  	 */
> -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -			 IXGBE_FDIRM_FLEX;
> +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>  	uint32_t fdiripv6m;
>  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>  	uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> +	/* flex byte mask */
> +	if (info->mask.flex_bytes_mask == 0)
> +		fdirm |= IXGBE_FDIRM_FLEX;
> +
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	const struct rte_flow_item_raw *raw_mask;
>  	const struct rte_flow_item_raw *raw_spec;
>  	uint8_t j;
> -
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
> +
>  	if (!pattern) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> -	rule->mask.flex_bytes_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
>  		/* check mask */
>  		if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> 
>  		/* check spec */
>  		if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	const struct rte_flow_item_eth *eth_mask;
>  	const struct rte_flow_item_vlan *vlan_spec;
>  	const struct rte_flow_item_vlan *vlan_mask;
> +	const struct rte_flow_item_raw *raw_mask;
> +	const struct rte_flow_item_raw *raw_spec;
>  	uint32_t j;
> 
>  	if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  				&rule->ixgbe_fdir.formatted.tni_vni),
>  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
>  		}
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/* Get the NVGRE info */
> @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
>  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
>  		}
> -	}
> 
> -	/* check if the next not void item is MAC */
> -	item = next_no_void_pattern(pattern, item);
> -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/**
> @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * others should be masked.
>  	 */
> 
> -	if (!item->mask) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -			item, "Not supported last point for range");
> -		return -rte_errno;
> -	}
> -	rule->b_mask = TRUE;
> -	eth_mask = item->mask;
> -
> -	/* Ether type should be masked. */
> -	if (eth_mask->type) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -
> -	/* src MAC address should be masked. */
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		if (eth_mask->src.addr_bytes[j]) {
> -			memset(rule, 0,
> -			       sizeof(struct ixgbe_fdir_rule));
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		if (!item->mask) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> -	rule->mask.mac_addr_byte_mask = 0;
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		/* It's a per byte mask. */
> -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> -		} else if (eth_mask->dst.addr_bytes[j]) {
> -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +		/*Not supported last point for range*/
> +		if (item->last) {
>  			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		rule->b_mask = TRUE;
> +		eth_mask = item->mask;
> +
> +		/* Ether type should be masked. */
> +		if (eth_mask->type) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> 
> -	/* When no vlan, considered as full mask. */
> -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> -
> -	if (item->spec) {
> -		rule->b_spec = TRUE;
> -		eth_spec = item->spec;
> -
> -		/* Get the dst MAC. */
> +		/* src MAC address should be masked. */
>  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> -				eth_spec->dst.addr_bytes[j];
> +			if (eth_mask->src.addr_bytes[j]) {
> +				memset(rule, 0,
> +			       sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
> +		}
> +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +			/* It's a per byte mask. */
> +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> +			} else if (eth_mask->dst.addr_bytes[j]) {
> +				memset(rule, 0, sizeof(struct
> ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
>  		}
> -	}
> 
> -	/**
> -	 * Check if the next not void item is vlan or ipv4.
> -	 * IPv6 is not supported.
> -	 */
> -	item = next_no_void_pattern(pattern, item);
> -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> +		if (item->spec) {
> +			rule->b_spec = TRUE;
> +			eth_spec = item->spec;
> +
> +			/* Get the dst MAC. */
> +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> +					eth_spec->dst.addr_bytes[j];
> +			}
> +		}
> +		/**
> +		 * Check if the next not void item is vlan or ipv4.
> +		 * IPv6 is not supported.
> +		 */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +			/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  			item, "Not supported last point for range");
> -		return -rte_errno;
> +			return -rte_errno;
> +		}
>  	}
> 
> +
>  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
>  		if (!(item->spec && item->mask)) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -
> 2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  		rule->mask.vlan_tci_mask = vlan_mask->tci;
>  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
>  		/* More than one tags are not supported. */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* Get the flex byte info */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> +		/* Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		/* mask should not be null */
> +		if (!item->mask || !item->spec) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
> +		/* check mask */
> +		if (raw_mask->relative != 0x1 ||
> +		    raw_mask->search != 0x1 ||
> +		    raw_mask->reserved != 0x0 ||
> +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> +		    raw_mask->limit != 0xffff ||
> +		    raw_mask->length != 0xffff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> +
> +		/* check spec */
> +		if (raw_spec->relative != 0 ||
> +		    raw_spec->search != 0 ||
> +		    raw_spec->reserved != 0 ||
> +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> +		    raw_spec->offset % 2 ||
> +		    raw_spec->limit != 0 ||
> +		    raw_spec->length != 2 ||
> +		    /* pattern can't be 0xffff */
> +		    (raw_spec->pattern[0] == 0xff &&
> +		     raw_spec->pattern[1] == 0xff)) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		/* check pattern mask */
> +		if (raw_mask->pattern[0] != 0xff ||
> +		    raw_mask->pattern[1] != 0xff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		rule->mask.flex_bytes_mask = 0xffff;
> +		rule->ixgbe_fdir.formatted.flex_bytes =
> +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> +			raw_spec->pattern[0];
> +		rule->flex_bytes_offset = raw_spec->offset;
>  		/* check if the next not void item is END */
>  		item = next_no_void_pattern(pattern, item);
> -
>  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
> @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
>  		hw->mac.type != ixgbe_mac_X550EM_a)
>  		return -ENOTSUP;
> 
> +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		goto tunnel_filter;
> +
>  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
>  					actions, rule, error);
> 
>  	if (!ret)
>  		goto step_next;
> 
> +tunnel_filter:
> +
>  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
>  					actions, rule, error);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-24 12:13 ` Zhang, Qi Z
@ 2018-12-25  2:04   ` Zhao1, Wei
  0 siblings, 0 replies; 6+ messages in thread
From: Zhao1, Wei @ 2018-12-25  2:04 UTC (permalink / raw)
  To: Zhang, Qi Z, dev; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo

Hi, qi

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Monday, December 24, 2018 8:13 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: RE: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> Hi Wei:
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Monday, December 17, 2018 1:53 PM
> > To: dev@dpdk.org
> > Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> >
> > There is need for users to use flexible byte filter on x550.
> > This patch enable it.
> 
> It's difficult for me review a large patch without understand the purpose
> clearly.
> For the description here, my understanding is you just try to enable an
> existing feature for a specific device ID.
> But why we have so much changes in your patch, would you explain more
> detail about what is the gap here?

It is because ixgbe flow parer code do not support tunnel mode flexible byte filter for x550,
So I have to enable it in  ixgbe_parse_fdir_filter_tunnel().
> BTW, it's better to separate the patch into two patches, one for fdir layer and
> one for rte_flow layer.

Yes, I will split it in v2, also I will add some code for flow CLI for parser HEX number, in order to not using string which pass ASIC number.

> 
> Thanks
> Qi
> >
> > Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for
> > X550")
> > Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
> >  drivers/net/ixgbe/ixgbe_flow.c | 274
> > ++++++++++++++++++++++++++++-------------
> >  2 files changed, 195 insertions(+), 88 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_fdir.c
> > b/drivers/net/ixgbe/ixgbe_fdir.c index
> > e559f0f..deb9a21 100644
> > --- a/drivers/net/ixgbe/ixgbe_fdir.c
> > +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> > @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> *dev)
> >  	/* flex byte mask */
> >  	if (info->mask.flex_bytes_mask == 0)
> >  		fdirm |= IXGBE_FDIRM_FLEX;
> > +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> > +		fdirm |= IXGBE_FDIRM_L3P;
> >
> >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> >
> > @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev
> *dev)
> >  	/* mask VM pool and DIPv6 since there are currently not supported
> >  	 * mask FLEX byte, it will be set in flex_conf
> >  	 */
> > -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> > -			 IXGBE_FDIRM_FLEX;
> > +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
> >  	uint32_t fdiripv6m;
> >  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> >  	uint16_t mac_mask;
> > @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev
> *dev)
> >  		return -EINVAL;
> >  	}
> >
> > +	/* flex byte mask */
> > +	if (info->mask.flex_bytes_mask == 0)
> > +		fdirm |= IXGBE_FDIRM_FLEX;
> > +
> >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> >
> >  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git
> > a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
> > index
> > f0fafeb..dc210c5 100644
> > --- a/drivers/net/ixgbe/ixgbe_flow.c
> > +++ b/drivers/net/ixgbe/ixgbe_flow.c
> > @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  	const struct rte_flow_item_raw *raw_mask;
> >  	const struct rte_flow_item_raw *raw_spec;
> >  	uint8_t j;
> > -
> >  	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >
> > +
> >  	if (!pattern) {
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  	 * value. So, we need not do anything for the not provided fields
> later.
> >  	 */
> >  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> > -	rule->mask.vlan_tci_mask = 0;
> > -	rule->mask.flex_bytes_mask = 0;
> > +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> >
> >  	/**
> >  	 * The first not void item should be @@ -1665,7 +1663,8 @@
> > ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
> >  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> > -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> > +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
> >  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  		}
> >
> >  		raw_mask = item->mask;
> > +		rule->b_mask = TRUE;
> >
> >  		/* check mask */
> >  		if (raw_mask->relative != 0x1 ||
> > @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct
> > rte_eth_dev *dev,
> >  		}
> >
> >  		raw_spec = item->spec;
> > +		rule->b_spec = TRUE;
> >
> >  		/* check spec */
> >  		if (raw_spec->relative != 0 ||
> > @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	const struct rte_flow_item_eth *eth_mask;
> >  	const struct rte_flow_item_vlan *vlan_spec;
> >  	const struct rte_flow_item_vlan *vlan_mask;
> > +	const struct rte_flow_item_raw *raw_mask;
> > +	const struct rte_flow_item_raw *raw_spec;
> >  	uint32_t j;
> >
> >  	if (!pattern) {
> > @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	 * value. So, we need not do anything for the not provided fields
> later.
> >  	 */
> >  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> > -	rule->mask.vlan_tci_mask = 0;
> > +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> >
> >  	/**
> >  	 * The first not void item should be @@ -2364,7 +2366,8 @@
> > ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
> >  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> >  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> > -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> > +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
> >  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  		rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_ITEM,
> > @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  				&rule->ixgbe_fdir.formatted.tni_vni),
> >  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
> >  		}
> > +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END){
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> >  	/* Get the NVGRE info */
> > @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
> >  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
> >  		}
> > -	}
> >
> > -	/* check if the next not void item is MAC */
> > -	item = next_no_void_pattern(pattern, item);
> > -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END){
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> >  	/**
> > @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> > rte_flow_attr *attr,
> >  	 * others should be masked.
> >  	 */
> >
> > -	if (!item->mask) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -	/*Not supported last point for range*/
> > -	if (item->last) {
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > -			item, "Not supported last point for range");
> > -		return -rte_errno;
> > -	}
> > -	rule->b_mask = TRUE;
> > -	eth_mask = item->mask;
> > -
> > -	/* Ether type should be masked. */
> > -	if (eth_mask->type) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -
> > -	/* src MAC address should be masked. */
> > -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -		if (eth_mask->src.addr_bytes[j]) {
> > -			memset(rule, 0,
> > -			       sizeof(struct ixgbe_fdir_rule));
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> > +		if (!item->mask) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  			rte_flow_error_set(error, EINVAL,
> >  				RTE_FLOW_ERROR_TYPE_ITEM,
> >  				item, "Not supported by fdir filter");
> >  			return -rte_errno;
> >  		}
> > -	}
> > -	rule->mask.mac_addr_byte_mask = 0;
> > -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -		/* It's a per byte mask. */
> > -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> > -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> > -		} else if (eth_mask->dst.addr_bytes[j]) {
> > -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +		/*Not supported last point for range*/
> > +		if (item->last) {
> >  			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				item, "Not supported last point for range");
> > +			return -rte_errno;
> > +		}
> > +		rule->b_mask = TRUE;
> > +		eth_mask = item->mask;
> > +
> > +		/* Ether type should be masked. */
> > +		if (eth_mask->type) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> >  				RTE_FLOW_ERROR_TYPE_ITEM,
> >  				item, "Not supported by fdir filter");
> >  			return -rte_errno;
> >  		}
> > -	}
> >
> > -	/* When no vlan, considered as full mask. */
> > -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> > -
> > -	if (item->spec) {
> > -		rule->b_spec = TRUE;
> > -		eth_spec = item->spec;
> > -
> > -		/* Get the dst MAC. */
> > +		/* src MAC address should be masked. */
> >  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> > -				eth_spec->dst.addr_bytes[j];
> > +			if (eth_mask->src.addr_bytes[j]) {
> > +				memset(rule, 0,
> > +			       sizeof(struct ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +				return -rte_errno;
> > +			}
> > +		}
> > +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > +			/* It's a per byte mask. */
> > +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> > +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> > +			} else if (eth_mask->dst.addr_bytes[j]) {
> > +				memset(rule, 0, sizeof(struct
> ixgbe_fdir_rule));
> > +				rte_flow_error_set(error, EINVAL,
> > +					RTE_FLOW_ERROR_TYPE_ITEM,
> > +					item, "Not supported by fdir filter");
> > +				return -rte_errno;
> > +			}
> >  		}
> > -	}
> >
> > -	/**
> > -	 * Check if the next not void item is vlan or ipv4.
> > -	 * IPv6 is not supported.
> > -	 */
> > -	item = next_no_void_pattern(pattern, item);
> > -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> > -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> > -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > -		rte_flow_error_set(error, EINVAL,
> > -			RTE_FLOW_ERROR_TYPE_ITEM,
> > -			item, "Not supported by fdir filter");
> > -		return -rte_errno;
> > -	}
> > -	/*Not supported last point for range*/
> > -	if (item->last) {
> > -		rte_flow_error_set(error, EINVAL,
> > +		if (item->spec) {
> > +			rule->b_spec = TRUE;
> > +			eth_spec = item->spec;
> > +
> > +			/* Get the dst MAC. */
> > +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> > +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> > +					eth_spec->dst.addr_bytes[j];
> > +			}
> > +		}
> > +		/**
> > +		 * Check if the next not void item is vlan or ipv4.
> > +		 * IPv6 is not supported.
> > +		 */
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +			/*Not supported last point for range*/
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> >  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> >  			item, "Not supported last point for range");
> > -		return -rte_errno;
> > +			return -rte_errno;
> > +		}
> >  	}
> >
> > +
> >  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
> >  		if (!(item->spec && item->mask)) {
> >  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -
> 2736,10
> > +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr
> > +*attr,
> >  		rule->mask.vlan_tci_mask = vlan_mask->tci;
> >  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
> >  		/* More than one tags are not supported. */
> > +		item = next_no_void_pattern(pattern, item);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> > +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +	}
> > +
> > +	/* Get the flex byte info */
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> > +		/* Not supported last point for range*/
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				item, "Not supported last point for range");
> > +			return -rte_errno;
> > +		}
> > +		/* mask should not be null */
> > +		if (!item->mask || !item->spec) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		raw_mask = item->mask;
> > +		rule->b_mask = TRUE;
> >
> > +		/* check mask */
> > +		if (raw_mask->relative != 0x1 ||
> > +		    raw_mask->search != 0x1 ||
> > +		    raw_mask->reserved != 0x0 ||
> > +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> > +		    raw_mask->limit != 0xffff ||
> > +		    raw_mask->length != 0xffff) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		raw_spec = item->spec;
> > +		rule->b_spec = TRUE;
> > +
> > +		/* check spec */
> > +		if (raw_spec->relative != 0 ||
> > +		    raw_spec->search != 0 ||
> > +		    raw_spec->reserved != 0 ||
> > +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> > +		    raw_spec->offset % 2 ||
> > +		    raw_spec->limit != 0 ||
> > +		    raw_spec->length != 2 ||
> > +		    /* pattern can't be 0xffff */
> > +		    (raw_spec->pattern[0] == 0xff &&
> > +		     raw_spec->pattern[1] == 0xff)) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		/* check pattern mask */
> > +		if (raw_mask->pattern[0] != 0xff ||
> > +		    raw_mask->pattern[1] != 0xff) {
> > +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by fdir filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		rule->mask.flex_bytes_mask = 0xffff;
> > +		rule->ixgbe_fdir.formatted.flex_bytes =
> > +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> > +			raw_spec->pattern[0];
> > +		rule->flex_bytes_offset = raw_spec->offset;
> >  		/* check if the next not void item is END */
> >  		item = next_no_void_pattern(pattern, item);
> > -
> >  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
> >  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> >  			rte_flow_error_set(error, EINVAL,
> > @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev
> *dev,
> >  		hw->mac.type != ixgbe_mac_X550EM_a)
> >  		return -ENOTSUP;
> >
> > +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > +		goto tunnel_filter;
> > +
> >  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
> >  					actions, rule, error);
> >
> >  	if (!ret)
> >  		goto step_next;
> >
> > +tunnel_filter:
> > +
> >  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
> >  					actions, rule, error);
> >
> > --
> > 2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-17  5:53 Zhao Wei
  2018-12-17  6:19 ` Zhao1, Wei
@ 2018-12-24 12:13 ` Zhang, Qi Z
  2018-12-25  2:04   ` Zhao1, Wei
  1 sibling, 1 reply; 6+ messages in thread
From: Zhang, Qi Z @ 2018-12-24 12:13 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo

Hi Wei:

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, December 17, 2018 1:53 PM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.

It's difficult for me review a large patch without understand the purpose clearly.
For the description here, my understanding is you just try to enable an existing feature for a specific device ID.
But why we have so much changes in your patch, would you explain more detail about what is the gap here?
BTW, it's better to separate the patch into two patches, one for fdir layer and one for rte_flow layer.

Thanks
Qi
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274
> ++++++++++++++++++++++++++++-------------
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index
> e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>  	/* flex byte mask */
>  	if (info->mask.flex_bytes_mask == 0)
>  		fdirm |= IXGBE_FDIRM_FLEX;
> +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
> +		fdirm |= IXGBE_FDIRM_L3P;
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  	/* mask VM pool and DIPv6 since there are currently not supported
>  	 * mask FLEX byte, it will be set in flex_conf
>  	 */
> -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -			 IXGBE_FDIRM_FLEX;
> +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>  	uint32_t fdiripv6m;
>  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>  	uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> +	/* flex byte mask */
> +	if (info->mask.flex_bytes_mask == 0)
> +		fdirm |= IXGBE_FDIRM_FLEX;
> +
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --git
> a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	const struct rte_flow_item_raw *raw_mask;
>  	const struct rte_flow_item_raw *raw_spec;
>  	uint8_t j;
> -
>  	struct ixgbe_hw *hw =
> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> 
> +
>  	if (!pattern) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	 * value. So, we need not do anything for the not provided fields later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> -	rule->mask.flex_bytes_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
>  		/* check mask */
>  		if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> 
>  		/* check spec */
>  		if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	const struct rte_flow_item_eth *eth_mask;
>  	const struct rte_flow_item_vlan *vlan_spec;
>  	const struct rte_flow_item_vlan *vlan_mask;
> +	const struct rte_flow_item_raw *raw_mask;
> +	const struct rte_flow_item_raw *raw_spec;
>  	uint32_t j;
> 
>  	if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * value. So, we need not do anything for the not provided fields later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  				&rule->ixgbe_fdir.formatted.tni_vni),
>  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
>  		}
> +		/* check if the next not void item is MAC VLAN RAW or END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/* Get the NVGRE info */
> @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
>  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
>  		}
> -	}
> 
> -	/* check if the next not void item is MAC */
> -	item = next_no_void_pattern(pattern, item);
> -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> +		/* check if the next not void item is MAC VLAN RAW or END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/**
> @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * others should be masked.
>  	 */
> 
> -	if (!item->mask) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -			item, "Not supported last point for range");
> -		return -rte_errno;
> -	}
> -	rule->b_mask = TRUE;
> -	eth_mask = item->mask;
> -
> -	/* Ether type should be masked. */
> -	if (eth_mask->type) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -
> -	/* src MAC address should be masked. */
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		if (eth_mask->src.addr_bytes[j]) {
> -			memset(rule, 0,
> -			       sizeof(struct ixgbe_fdir_rule));
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		if (!item->mask) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> -	rule->mask.mac_addr_byte_mask = 0;
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		/* It's a per byte mask. */
> -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> -		} else if (eth_mask->dst.addr_bytes[j]) {
> -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +		/*Not supported last point for range*/
> +		if (item->last) {
>  			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		rule->b_mask = TRUE;
> +		eth_mask = item->mask;
> +
> +		/* Ether type should be masked. */
> +		if (eth_mask->type) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> 
> -	/* When no vlan, considered as full mask. */
> -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> -
> -	if (item->spec) {
> -		rule->b_spec = TRUE;
> -		eth_spec = item->spec;
> -
> -		/* Get the dst MAC. */
> +		/* src MAC address should be masked. */
>  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> -				eth_spec->dst.addr_bytes[j];
> +			if (eth_mask->src.addr_bytes[j]) {
> +				memset(rule, 0,
> +			       sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
> +		}
> +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +			/* It's a per byte mask. */
> +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> +			} else if (eth_mask->dst.addr_bytes[j]) {
> +				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
>  		}
> -	}
> 
> -	/**
> -	 * Check if the next not void item is vlan or ipv4.
> -	 * IPv6 is not supported.
> -	 */
> -	item = next_no_void_pattern(pattern, item);
> -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> +		if (item->spec) {
> +			rule->b_spec = TRUE;
> +			eth_spec = item->spec;
> +
> +			/* Get the dst MAC. */
> +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> +					eth_spec->dst.addr_bytes[j];
> +			}
> +		}
> +		/**
> +		 * Check if the next not void item is vlan or ipv4.
> +		 * IPv6 is not supported.
> +		 */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +			/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  			item, "Not supported last point for range");
> -		return -rte_errno;
> +			return -rte_errno;
> +		}
>  	}
> 
> +
>  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
>  		if (!(item->spec && item->mask)) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2736,10
> +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
>  		rule->mask.vlan_tci_mask = vlan_mask->tci;
>  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
>  		/* More than one tags are not supported. */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* Get the flex byte info */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> +		/* Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		/* mask should not be null */
> +		if (!item->mask || !item->spec) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
> +		/* check mask */
> +		if (raw_mask->relative != 0x1 ||
> +		    raw_mask->search != 0x1 ||
> +		    raw_mask->reserved != 0x0 ||
> +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> +		    raw_mask->limit != 0xffff ||
> +		    raw_mask->length != 0xffff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> +
> +		/* check spec */
> +		if (raw_spec->relative != 0 ||
> +		    raw_spec->search != 0 ||
> +		    raw_spec->reserved != 0 ||
> +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> +		    raw_spec->offset % 2 ||
> +		    raw_spec->limit != 0 ||
> +		    raw_spec->length != 2 ||
> +		    /* pattern can't be 0xffff */
> +		    (raw_spec->pattern[0] == 0xff &&
> +		     raw_spec->pattern[1] == 0xff)) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		/* check pattern mask */
> +		if (raw_mask->pattern[0] != 0xff ||
> +		    raw_mask->pattern[1] != 0xff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		rule->mask.flex_bytes_mask = 0xffff;
> +		rule->ixgbe_fdir.formatted.flex_bytes =
> +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> +			raw_spec->pattern[0];
> +		rule->flex_bytes_offset = raw_spec->offset;
>  		/* check if the next not void item is END */
>  		item = next_no_void_pattern(pattern, item);
> -
>  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
> @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
>  		hw->mac.type != ixgbe_mac_X550EM_a)
>  		return -ENOTSUP;
> 
> +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		goto tunnel_filter;
> +
>  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
>  					actions, rule, error);
> 
>  	if (!ret)
>  		goto step_next;
> 
> +tunnel_filter:
> +
>  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
>  					actions, rule, error);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
  2018-12-17  5:53 Zhao Wei
@ 2018-12-17  6:19 ` Zhao1, Wei
  2018-12-24 12:13 ` Zhang, Qi Z
  1 sibling, 0 replies; 6+ messages in thread
From: Zhao1, Wei @ 2018-12-17  6:19 UTC (permalink / raw)
  To: dev, Peng, Yuan; +Cc: adrien.mazarguil, stable, Lu, Wenzhuo, Zhang, Qi Z

Add yuan.peng@intel.com into mail loop

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, December 17, 2018 1:53 PM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; stable@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH] net/ixgbe: enable x550 flexible byte filter
> 
> There is need for users to use flexible byte filter on x550.
> This patch enable it.
> 
> Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
> Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
>  drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++--
> -----------
>  2 files changed, 195 insertions(+), 88 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
> index e559f0f..deb9a21 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
>  	/* flex byte mask */
>  	if (info->mask.flex_bytes_mask == 0)
>  		fdirm |= IXGBE_FDIRM_FLEX;
> +	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask ==
> 0)
> +		fdirm |= IXGBE_FDIRM_L3P;
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> @@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  	/* mask VM pool and DIPv6 since there are currently not supported
>  	 * mask FLEX byte, it will be set in flex_conf
>  	 */
> -	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
> -			 IXGBE_FDIRM_FLEX;
> +	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
>  	uint32_t fdiripv6m;
>  	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
>  	uint16_t mac_mask;
> @@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> +	/* flex byte mask */
> +	if (info->mask.flex_bytes_mask == 0)
> +		fdirm |= IXGBE_FDIRM_FLEX;
> +
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
>  	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); diff --
> git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index
> f0fafeb..dc210c5 100644
> --- a/drivers/net/ixgbe/ixgbe_flow.c
> +++ b/drivers/net/ixgbe/ixgbe_flow.c
> @@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	const struct rte_flow_item_raw *raw_mask;
>  	const struct rte_flow_item_raw *raw_spec;
>  	uint8_t j;
> -
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
> +
>  	if (!pattern) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> @@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> -	rule->mask.flex_bytes_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
>  		/* check mask */
>  		if (raw_mask->relative != 0x1 ||
> @@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev
> *dev,
>  		}
> 
>  		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> 
>  		/* check spec */
>  		if (raw_spec->relative != 0 ||
> @@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	const struct rte_flow_item_eth *eth_mask;
>  	const struct rte_flow_item_vlan *vlan_spec;
>  	const struct rte_flow_item_vlan *vlan_mask;
> +	const struct rte_flow_item_raw *raw_mask;
> +	const struct rte_flow_item_raw *raw_spec;
>  	uint32_t j;
> 
>  	if (!pattern) {
> @@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * value. So, we need not do anything for the not provided fields
> later.
>  	 */
>  	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
> -	rule->mask.vlan_tci_mask = 0;
> +	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
> 
>  	/**
>  	 * The first not void item should be
> @@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>  	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> -	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
>  		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  				&rule->ixgbe_fdir.formatted.tni_vni),
>  				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
>  		}
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/* Get the NVGRE info */
> @@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
>  			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
>  		}
> -	}
> 
> -	/* check if the next not void item is MAC */
> -	item = next_no_void_pattern(pattern, item);
> -	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> +		/* check if the next not void item is MAC VLAN RAW or
> END*/
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END){
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
>  	}
> 
>  	/**
> @@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  	 * others should be masked.
>  	 */
> 
> -	if (!item->mask) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -			item, "Not supported last point for range");
> -		return -rte_errno;
> -	}
> -	rule->b_mask = TRUE;
> -	eth_mask = item->mask;
> -
> -	/* Ether type should be masked. */
> -	if (eth_mask->type) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -
> -	/* src MAC address should be masked. */
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		if (eth_mask->src.addr_bytes[j]) {
> -			memset(rule, 0,
> -			       sizeof(struct ixgbe_fdir_rule));
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		if (!item->mask) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> -	rule->mask.mac_addr_byte_mask = 0;
> -	for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -		/* It's a per byte mask. */
> -		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> -			rule->mask.mac_addr_byte_mask |= 0x1 << j;
> -		} else if (eth_mask->dst.addr_bytes[j]) {
> -			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +		/*Not supported last point for range*/
> +		if (item->last) {
>  			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		rule->b_mask = TRUE;
> +		eth_mask = item->mask;
> +
> +		/* Ether type should be masked. */
> +		if (eth_mask->type) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
>  				RTE_FLOW_ERROR_TYPE_ITEM,
>  				item, "Not supported by fdir filter");
>  			return -rte_errno;
>  		}
> -	}
> 
> -	/* When no vlan, considered as full mask. */
> -	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
> -
> -	if (item->spec) {
> -		rule->b_spec = TRUE;
> -		eth_spec = item->spec;
> -
> -		/* Get the dst MAC. */
> +		/* src MAC address should be masked. */
>  		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> -			rule->ixgbe_fdir.formatted.inner_mac[j] =
> -				eth_spec->dst.addr_bytes[j];
> +			if (eth_mask->src.addr_bytes[j]) {
> +				memset(rule, 0,
> +			       sizeof(struct ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
> +		}
> +		for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +			/* It's a per byte mask. */
> +			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
> +				rule->mask.mac_addr_byte_mask |= 0x1 << j;
> +			} else if (eth_mask->dst.addr_bytes[j]) {
> +				memset(rule, 0, sizeof(struct
> ixgbe_fdir_rule));
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Not supported by fdir filter");
> +				return -rte_errno;
> +			}
>  		}
> -	}
> 
> -	/**
> -	 * Check if the next not void item is vlan or ipv4.
> -	 * IPv6 is not supported.
> -	 */
> -	item = next_no_void_pattern(pattern, item);
> -	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
> -		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
> -		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> -		rte_flow_error_set(error, EINVAL,
> -			RTE_FLOW_ERROR_TYPE_ITEM,
> -			item, "Not supported by fdir filter");
> -		return -rte_errno;
> -	}
> -	/*Not supported last point for range*/
> -	if (item->last) {
> -		rte_flow_error_set(error, EINVAL,
> +		if (item->spec) {
> +			rule->b_spec = TRUE;
> +			eth_spec = item->spec;
> +
> +			/* Get the dst MAC. */
> +			for (j = 0; j < ETHER_ADDR_LEN; j++) {
> +				rule->ixgbe_fdir.formatted.inner_mac[j] =
> +					eth_spec->dst.addr_bytes[j];
> +			}
> +		}
> +		/**
> +		 * Check if the next not void item is vlan or ipv4.
> +		 * IPv6 is not supported.
> +		 */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
> +			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +			/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  			item, "Not supported last point for range");
> -		return -rte_errno;
> +			return -rte_errno;
> +		}
>  	}
> 
> +
>  	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
>  		if (!(item->spec && item->mask)) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -
> 2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct
> rte_flow_attr *attr,
>  		rule->mask.vlan_tci_mask = vlan_mask->tci;
>  		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
>  		/* More than one tags are not supported. */
> +		item = next_no_void_pattern(pattern, item);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
> +			item->type != RTE_FLOW_ITEM_TYPE_END) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* Get the flex byte info */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
> +		/* Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +		}
> +		/* mask should not be null */
> +		if (!item->mask || !item->spec) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_mask = item->mask;
> +		rule->b_mask = TRUE;
> 
> +		/* check mask */
> +		if (raw_mask->relative != 0x1 ||
> +		    raw_mask->search != 0x1 ||
> +		    raw_mask->reserved != 0x0 ||
> +		    (uint32_t)raw_mask->offset != 0xffffffff ||
> +		    raw_mask->limit != 0xffff ||
> +		    raw_mask->length != 0xffff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		raw_spec = item->spec;
> +		rule->b_spec = TRUE;
> +
> +		/* check spec */
> +		if (raw_spec->relative != 0 ||
> +		    raw_spec->search != 0 ||
> +		    raw_spec->reserved != 0 ||
> +		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
> +		    raw_spec->offset % 2 ||
> +		    raw_spec->limit != 0 ||
> +		    raw_spec->length != 2 ||
> +		    /* pattern can't be 0xffff */
> +		    (raw_spec->pattern[0] == 0xff &&
> +		     raw_spec->pattern[1] == 0xff)) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		/* check pattern mask */
> +		if (raw_mask->pattern[0] != 0xff ||
> +		    raw_mask->pattern[1] != 0xff) {
> +			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by fdir filter");
> +			return -rte_errno;
> +		}
> +
> +		rule->mask.flex_bytes_mask = 0xffff;
> +		rule->ixgbe_fdir.formatted.flex_bytes =
> +			(((uint16_t)raw_spec->pattern[1]) << 8) |
> +			raw_spec->pattern[0];
> +		rule->flex_bytes_offset = raw_spec->offset;
>  		/* check if the next not void item is END */
>  		item = next_no_void_pattern(pattern, item);
> -
>  		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
>  			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
>  			rte_flow_error_set(error, EINVAL,
> @@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
>  		hw->mac.type != ixgbe_mac_X550EM_a)
>  		return -ENOTSUP;
> 
> +	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		goto tunnel_filter;
> +
>  	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
>  					actions, rule, error);
> 
>  	if (!ret)
>  		goto step_next;
> 
> +tunnel_filter:
> +
>  	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
>  					actions, rule, error);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter
@ 2018-12-17  5:53 Zhao Wei
  2018-12-17  6:19 ` Zhao1, Wei
  2018-12-24 12:13 ` Zhang, Qi Z
  0 siblings, 2 replies; 6+ messages in thread
From: Zhao Wei @ 2018-12-17  5:53 UTC (permalink / raw)
  To: dev; +Cc: adrien.mazarguil, stable, wenzhuo.lu, qi.z.zhang, Wei Zhao

There is need for users to use flexible byte filter on x550.
This patch enable it.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	/* flex byte mask */
 	if (info->mask.flex_bytes_mask == 0)
 		fdirm |= IXGBE_FDIRM_FLEX;
+	if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+		fdirm |= IXGBE_FDIRM_L3P;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 	/* mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-			 IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdiripv6m;
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 	uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_mask;
 	const struct rte_flow_item_raw *raw_spec;
 	uint8_t j;
-
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
 	if (!pattern) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
-	rule->mask.flex_bytes_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
 		/* check mask */
 		if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 		}
 
 		raw_spec = item->spec;
+		rule->b_spec = TRUE;
 
 		/* check spec */
 		if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_eth *eth_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 	uint32_t j;
 
 	if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * value. So, we need not do anything for the not provided fields later.
 	 */
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-	rule->mask.vlan_tci_mask = 0;
+	memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
 	/**
 	 * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
 	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+	    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				&rule->ixgbe_fdir.formatted.tni_vni),
 				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
 		}
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
 			nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
 		}
-	}
 
-	/* check if the next not void item is MAC */
-	item = next_no_void_pattern(pattern, item);
-	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
+		/* check if the next not void item is MAC VLAN RAW or END*/
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END){
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
 	}
 
 	/**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 	 * others should be masked.
 	 */
 
-	if (!item->mask) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-	}
-	rule->b_mask = TRUE;
-	eth_mask = item->mask;
-
-	/* Ether type should be masked. */
-	if (eth_mask->type) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-
-	/* src MAC address should be masked. */
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		if (eth_mask->src.addr_bytes[j]) {
-			memset(rule, 0,
-			       sizeof(struct ixgbe_fdir_rule));
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
-	rule->mask.mac_addr_byte_mask = 0;
-	for (j = 0; j < ETHER_ADDR_LEN; j++) {
-		/* It's a per byte mask. */
-		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-			rule->mask.mac_addr_byte_mask |= 0x1 << j;
-		} else if (eth_mask->dst.addr_bytes[j]) {
-			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		/*Not supported last point for range*/
+		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		rule->b_mask = TRUE;
+		eth_mask = item->mask;
+
+		/* Ether type should be masked. */
+		if (eth_mask->type) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-	}
 
-	/* When no vlan, considered as full mask. */
-	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-	if (item->spec) {
-		rule->b_spec = TRUE;
-		eth_spec = item->spec;
-
-		/* Get the dst MAC. */
+		/* src MAC address should be masked. */
 		for (j = 0; j < ETHER_ADDR_LEN; j++) {
-			rule->ixgbe_fdir.formatted.inner_mac[j] =
-				eth_spec->dst.addr_bytes[j];
+			if (eth_mask->src.addr_bytes[j]) {
+				memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
+		}
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			/* It's a per byte mask. */
+			if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+				rule->mask.mac_addr_byte_mask |= 0x1 << j;
+			} else if (eth_mask->dst.addr_bytes[j]) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fdir filter");
+				return -rte_errno;
+			}
 		}
-	}
 
-	/**
-	 * Check if the next not void item is vlan or ipv4.
-	 * IPv6 is not supported.
-	 */
-	item = next_no_void_pattern(pattern, item);
-	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-		(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by fdir filter");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+			/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 			item, "Not supported last point for range");
-		return -rte_errno;
+			return -rte_errno;
+		}
 	}
 
+
 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 		if (!(item->spec && item->mask)) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 		rule->mask.vlan_tci_mask = vlan_mask->tci;
 		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
 		/* More than one tags are not supported. */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = item->mask;
+		rule->b_mask = TRUE;
 
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = item->spec;
+		rule->b_spec = TRUE;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 		/* check if the next not void item is END */
 		item = next_no_void_pattern(pattern, item);
-
 		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 		hw->mac.type != ixgbe_mac_X550EM_a)
 		return -ENOTSUP;
 
+	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		goto tunnel_filter;
+
 	ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
 					actions, rule, error);
 
 	if (!ret)
 		goto step_next;
 
+tunnel_filter:
+
 	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
 					actions, rule, error);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-01-02  7:02 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-02  6:31 [dpdk-dev] [PATCH] net/ixgbe: enable x550 flexible byte filter Wei Zhao
2019-01-02  7:02 ` Zhao1, Wei
  -- strict thread matches above, loose matches on Subject: below --
2018-12-17  5:53 Zhao Wei
2018-12-17  6:19 ` Zhao1, Wei
2018-12-24 12:13 ` Zhang, Qi Z
2018-12-25  2:04   ` Zhao1, Wei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).