DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP
@ 2019-07-24 11:31 Xiaoyu Min
  2019-08-06  9:20 ` [dpdk-dev] [Suspected-Phishing][PATCH] " Slava Ovsiienko
  2019-08-06 10:38 ` [dpdk-dev] [PATCH] " Raslan Darawsheh
  0 siblings, 2 replies; 3+ messages in thread
From: Xiaoyu Min @ 2019-07-24 11:31 UTC (permalink / raw)
  To: Shahaf Shuler, Yongseok Koh, Viacheslav Ovsiienko; +Cc: dev, jackmin

The RSS expand function for IP-in-IP tunnel type is missed,
which leads to create following flow failed:

   flow create 0 ingress pattern eth / ipv4 proto is 4 /
        ipv4 / udp / end actions rss queues 0 1 end level 2
	types ip ipv4-other udp ipv4 ipv4-frag end /
	mark id 221 / count / end

In order to make RSS expand function working correctly,
now the way to check whether a IP tunnel existing is to
check whether there is the second IPv4/IPv6 item and whether the
first IPv4/IPv6 item's next protocl is IPPROTO_IPIP/IPPROTO_IPV6.
For example:
  ... pattern eth / ipv4 proto is 4 / ipv4 / ....

Fixes: 5e33bebdd8d3 ("net/mlx5: support IP-in-IP tunnel")
Cc: jackmin@mellanox.com

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  8 +++-
 drivers/net/mlx5/mlx5_flow_dv.c | 79 ++++++++++++++++++++-------------
 2 files changed, 53 insertions(+), 34 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3d2d5fc53a..f117369fa9 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -127,7 +127,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
 		.next = RTE_FLOW_EXPAND_RSS_NEXT
 			(MLX5_EXPANSION_OUTER_IPV4_UDP,
 			 MLX5_EXPANSION_OUTER_IPV4_TCP,
-			 MLX5_EXPANSION_GRE),
+			 MLX5_EXPANSION_GRE,
+			 MLX5_EXPANSION_IPV4,
+			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
 		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
 			ETH_RSS_NONFRAG_IPV4_OTHER,
@@ -145,7 +147,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
 	[MLX5_EXPANSION_OUTER_IPV6] = {
 		.next = RTE_FLOW_EXPAND_RSS_NEXT
 			(MLX5_EXPANSION_OUTER_IPV6_UDP,
-			 MLX5_EXPANSION_OUTER_IPV6_TCP),
+			 MLX5_EXPANSION_OUTER_IPV6_TCP,
+			 MLX5_EXPANSION_IPV4,
+			 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
 		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
 			ETH_RSS_NONFRAG_IPV6_OTHER,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f1d32bdff0..2ff8c673b0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -146,36 +146,20 @@ struct field_modify_info modify_tcp[] = {
 };
 
 static void
-mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
+mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
+			  uint8_t next_protocol, uint64_t *item_flags,
+			  int *tunnel)
 {
-	uint8_t next_protocol = 0xFF;
-
-	if (item->mask != NULL) {
-		switch (item->type) {
-		case RTE_FLOW_ITEM_TYPE_IPV4:
-			next_protocol =
-				((const struct rte_flow_item_ipv4 *)
-				 (item->spec))->hdr.next_proto_id;
-			next_protocol &=
-				((const struct rte_flow_item_ipv4 *)
-				 (item->mask))->hdr.next_proto_id;
-			break;
-		case RTE_FLOW_ITEM_TYPE_IPV6:
-			next_protocol =
-				((const struct rte_flow_item_ipv6 *)
-				 (item->spec))->hdr.proto;
-			next_protocol &=
-				((const struct rte_flow_item_ipv6 *)
-				 (item->mask))->hdr.proto;
-			break;
-		default:
-			break;
-		}
+	assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	       item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+	if (next_protocol == IPPROTO_IPIP) {
+		*item_flags |= MLX5_FLOW_LAYER_IPIP;
+		*tunnel = 1;
+	}
+	if (next_protocol == IPPROTO_IPV6) {
+		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+		*tunnel = 1;
 	}
-	if (next_protocol == IPPROTO_IPIP)
-		*flags |= MLX5_FLOW_LAYER_IPIP;
-	if (next_protocol == IPPROTO_IPV6)
-		*flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
 }
 
 /**
@@ -2902,6 +2886,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 					     MLX5_FLOW_LAYER_OUTER_VLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
+			mlx5_flow_tunnel_ip_check(items, next_protocol,
+						  &item_flags, &tunnel);
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
 							   NULL, error);
 			if (ret < 0)
@@ -2921,9 +2907,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				/* Reset for inner layer. */
 				next_protocol = 0xff;
 			}
-			mlx5_flow_tunnel_ip_check(items, &last_item);
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
+			mlx5_flow_tunnel_ip_check(items, next_protocol,
+						  &item_flags, &tunnel);
 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
 							   NULL, error);
 			if (ret < 0)
@@ -2943,7 +2930,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				/* Reset for inner layer. */
 				next_protocol = 0xff;
 			}
-			mlx5_flow_tunnel_ip_check(items, &last_item);
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			ret = mlx5_flow_validate_item_tcp
@@ -4686,6 +4672,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	uint32_t modify_action_position = UINT32_MAX;
 	void *match_mask = matcher.mask.buf;
 	void *match_value = dev_flow->dv.value.buf;
+	uint8_t next_protocol = 0xff;
 
 	flow->group = attr->group;
 	if (attr->transfer)
@@ -5007,6 +4994,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					      MLX5_FLOW_LAYER_OUTER_VLAN);
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
+			mlx5_flow_tunnel_ip_check(items, next_protocol,
+						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv4(match_mask, match_value,
 						    items, tunnel, attr->group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
@@ -5017,9 +5006,23 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					 MLX5_IPV4_IBV_RX_HASH);
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-			mlx5_flow_tunnel_ip_check(items, &last_item);
+			if (items->mask != NULL &&
+			    ((const struct rte_flow_item_ipv4 *)
+			     items->mask)->hdr.next_proto_id) {
+				next_protocol =
+					((const struct rte_flow_item_ipv4 *)
+					 (items->spec))->hdr.next_proto_id;
+				next_protocol &=
+					((const struct rte_flow_item_ipv4 *)
+					 (items->mask))->hdr.next_proto_id;
+			} else {
+				/* Reset for inner layer. */
+				next_protocol = 0xff;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
+			mlx5_flow_tunnel_ip_check(items, next_protocol,
+						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv6(match_mask, match_value,
 						    items, tunnel, attr->group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
@@ -5030,7 +5033,19 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					 MLX5_IPV6_IBV_RX_HASH);
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
-			mlx5_flow_tunnel_ip_check(items, &last_item);
+			if (items->mask != NULL &&
+			    ((const struct rte_flow_item_ipv6 *)
+			     items->mask)->hdr.proto) {
+				next_protocol =
+					((const struct rte_flow_item_ipv6 *)
+					 items->spec)->hdr.proto;
+				next_protocol &=
+					((const struct rte_flow_item_ipv6 *)
+					 items->mask)->hdr.proto;
+			} else {
+				/* Reset for inner layer. */
+				next_protocol = 0xff;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			flow_dv_translate_item_tcp(match_mask, match_value,
-- 
2.21.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [Suspected-Phishing][PATCH] net/mlx5: fix RSS expand for IP-in-IP
  2019-07-24 11:31 [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP Xiaoyu Min
@ 2019-08-06  9:20 ` Slava Ovsiienko
  2019-08-06 10:38 ` [dpdk-dev] [PATCH] " Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Slava Ovsiienko @ 2019-08-06  9:20 UTC (permalink / raw)
  To: Jack Min, Shahaf Shuler, Yongseok Koh; +Cc: dev, Jack Min

> -----Original Message-----
> From: Xiaoyu Min <jackmin@mellanox.com>
> Sent: Wednesday, July 24, 2019 14:32
> To: Shahaf Shuler <shahafs@mellanox.com>; Yongseok Koh
> <yskoh@mellanox.com>; Slava Ovsiienko <viacheslavo@mellanox.com>
> Cc: dev@dpdk.org; Jack Min <jackmin@mellanox.com>
> Subject: [Suspected-Phishing][PATCH] net/mlx5: fix RSS expand for IP-in-IP
> 
> The RSS expand function for IP-in-IP tunnel type is missed, which leads to
> create following flow failed:
> 
>    flow create 0 ingress pattern eth / ipv4 proto is 4 /
>         ipv4 / udp / end actions rss queues 0 1 end level 2
> 	types ip ipv4-other udp ipv4 ipv4-frag end /
> 	mark id 221 / count / end
> 
> In order to make RSS expand function working correctly, now the way to
> check whether a IP tunnel existing is to check whether there is the second
> IPv4/IPv6 item and whether the first IPv4/IPv6 item's next protocl is
> IPPROTO_IPIP/IPPROTO_IPV6.
> For example:
>   ... pattern eth / ipv4 proto is 4 / ipv4 / ....
> 
> Fixes: 5e33bebdd8d3 ("net/mlx5: support IP-in-IP tunnel")
> Cc: jackmin@mellanox.com
> 
> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>

> ---
>  drivers/net/mlx5/mlx5_flow.c    |  8 +++-
>  drivers/net/mlx5/mlx5_flow_dv.c | 79 ++++++++++++++++++++-------------
>  2 files changed, 53 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 3d2d5fc53a..f117369fa9 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -127,7 +127,9 @@ static const struct rte_flow_expand_node
> mlx5_support_expansion[] = {
>  		.next = RTE_FLOW_EXPAND_RSS_NEXT
>  			(MLX5_EXPANSION_OUTER_IPV4_UDP,
>  			 MLX5_EXPANSION_OUTER_IPV4_TCP,
> -			 MLX5_EXPANSION_GRE),
> +			 MLX5_EXPANSION_GRE,
> +			 MLX5_EXPANSION_IPV4,
> +			 MLX5_EXPANSION_IPV6),
>  		.type = RTE_FLOW_ITEM_TYPE_IPV4,
>  		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
>  			ETH_RSS_NONFRAG_IPV4_OTHER,
> @@ -145,7 +147,9 @@ static const struct rte_flow_expand_node
> mlx5_support_expansion[] = {
>  	[MLX5_EXPANSION_OUTER_IPV6] = {
>  		.next = RTE_FLOW_EXPAND_RSS_NEXT
>  			(MLX5_EXPANSION_OUTER_IPV6_UDP,
> -			 MLX5_EXPANSION_OUTER_IPV6_TCP),
> +			 MLX5_EXPANSION_OUTER_IPV6_TCP,
> +			 MLX5_EXPANSION_IPV4,
> +			 MLX5_EXPANSION_IPV6),
>  		.type = RTE_FLOW_ITEM_TYPE_IPV6,
>  		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
>  			ETH_RSS_NONFRAG_IPV6_OTHER,
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c index f1d32bdff0..2ff8c673b0 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -146,36 +146,20 @@ struct field_modify_info modify_tcp[] = {  };
> 
>  static void
> -mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t
> *flags)
> +mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item
> __rte_unused,
> +			  uint8_t next_protocol, uint64_t *item_flags,
> +			  int *tunnel)
>  {
> -	uint8_t next_protocol = 0xFF;
> -
> -	if (item->mask != NULL) {
> -		switch (item->type) {
> -		case RTE_FLOW_ITEM_TYPE_IPV4:
> -			next_protocol =
> -				((const struct rte_flow_item_ipv4 *)
> -				 (item->spec))->hdr.next_proto_id;
> -			next_protocol &=
> -				((const struct rte_flow_item_ipv4 *)
> -				 (item->mask))->hdr.next_proto_id;
> -			break;
> -		case RTE_FLOW_ITEM_TYPE_IPV6:
> -			next_protocol =
> -				((const struct rte_flow_item_ipv6 *)
> -				 (item->spec))->hdr.proto;
> -			next_protocol &=
> -				((const struct rte_flow_item_ipv6 *)
> -				 (item->mask))->hdr.proto;
> -			break;
> -		default:
> -			break;
> -		}
> +	assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> +	       item->type == RTE_FLOW_ITEM_TYPE_IPV6);
> +	if (next_protocol == IPPROTO_IPIP) {
> +		*item_flags |= MLX5_FLOW_LAYER_IPIP;
> +		*tunnel = 1;
> +	}
> +	if (next_protocol == IPPROTO_IPV6) {
> +		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
> +		*tunnel = 1;
>  	}
> -	if (next_protocol == IPPROTO_IPIP)
> -		*flags |= MLX5_FLOW_LAYER_IPIP;
> -	if (next_protocol == IPPROTO_IPV6)
> -		*flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
>  }
> 
>  /**
> @@ -2902,6 +2886,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const
> struct rte_flow_attr *attr,
> 
> MLX5_FLOW_LAYER_OUTER_VLAN;
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			mlx5_flow_tunnel_ip_check(items, next_protocol,
> +						  &item_flags, &tunnel);
>  			ret = mlx5_flow_validate_item_ipv4(items,
> item_flags,
>  							   NULL, error);
>  			if (ret < 0)
> @@ -2921,9 +2907,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const
> struct rte_flow_attr *attr,
>  				/* Reset for inner layer. */
>  				next_protocol = 0xff;
>  			}
> -			mlx5_flow_tunnel_ip_check(items, &last_item);
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			mlx5_flow_tunnel_ip_check(items, next_protocol,
> +						  &item_flags, &tunnel);
>  			ret = mlx5_flow_validate_item_ipv6(items,
> item_flags,
>  							   NULL, error);
>  			if (ret < 0)
> @@ -2943,7 +2930,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const
> struct rte_flow_attr *attr,
>  				/* Reset for inner layer. */
>  				next_protocol = 0xff;
>  			}
> -			mlx5_flow_tunnel_ip_check(items, &last_item);
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_TCP:
>  			ret = mlx5_flow_validate_item_tcp
> @@ -4686,6 +4672,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
>  	uint32_t modify_action_position = UINT32_MAX;
>  	void *match_mask = matcher.mask.buf;
>  	void *match_value = dev_flow->dv.value.buf;
> +	uint8_t next_protocol = 0xff;
> 
>  	flow->group = attr->group;
>  	if (attr->transfer)
> @@ -5007,6 +4994,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
> 
> MLX5_FLOW_LAYER_OUTER_VLAN);
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			mlx5_flow_tunnel_ip_check(items, next_protocol,
> +						  &item_flags, &tunnel);
>  			flow_dv_translate_item_ipv4(match_mask,
> match_value,
>  						    items, tunnel, attr->group);
>  			matcher.priority = MLX5_PRIORITY_MAP_L3; @@ -
> 5017,9 +5006,23 @@ flow_dv_translate(struct rte_eth_dev *dev,
>  					 MLX5_IPV4_IBV_RX_HASH);
>  			last_item = tunnel ?
> MLX5_FLOW_LAYER_INNER_L3_IPV4 :
> 
> MLX5_FLOW_LAYER_OUTER_L3_IPV4;
> -			mlx5_flow_tunnel_ip_check(items, &last_item);
> +			if (items->mask != NULL &&
> +			    ((const struct rte_flow_item_ipv4 *)
> +			     items->mask)->hdr.next_proto_id) {
> +				next_protocol =
> +					((const struct rte_flow_item_ipv4 *)
> +					 (items->spec))->hdr.next_proto_id;
> +				next_protocol &=
> +					((const struct rte_flow_item_ipv4 *)
> +					 (items->mask))->hdr.next_proto_id;
> +			} else {
> +				/* Reset for inner layer. */
> +				next_protocol = 0xff;
> +			}
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			mlx5_flow_tunnel_ip_check(items, next_protocol,
> +						  &item_flags, &tunnel);
>  			flow_dv_translate_item_ipv6(match_mask,
> match_value,
>  						    items, tunnel, attr->group);
>  			matcher.priority = MLX5_PRIORITY_MAP_L3; @@ -
> 5030,7 +5033,19 @@ flow_dv_translate(struct rte_eth_dev *dev,
>  					 MLX5_IPV6_IBV_RX_HASH);
>  			last_item = tunnel ?
> MLX5_FLOW_LAYER_INNER_L3_IPV6 :
> 
> MLX5_FLOW_LAYER_OUTER_L3_IPV6;
> -			mlx5_flow_tunnel_ip_check(items, &last_item);
> +			if (items->mask != NULL &&
> +			    ((const struct rte_flow_item_ipv6 *)
> +			     items->mask)->hdr.proto) {
> +				next_protocol =
> +					((const struct rte_flow_item_ipv6 *)
> +					 items->spec)->hdr.proto;
> +				next_protocol &=
> +					((const struct rte_flow_item_ipv6 *)
> +					 items->mask)->hdr.proto;
> +			} else {
> +				/* Reset for inner layer. */
> +				next_protocol = 0xff;
> +			}
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_TCP:
>  			flow_dv_translate_item_tcp(match_mask,
> match_value,
> --
> 2.21.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP
  2019-07-24 11:31 [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP Xiaoyu Min
  2019-08-06  9:20 ` [dpdk-dev] [Suspected-Phishing][PATCH] " Slava Ovsiienko
@ 2019-08-06 10:38 ` Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Raslan Darawsheh @ 2019-08-06 10:38 UTC (permalink / raw)
  To: Jack Min, Shahaf Shuler, Yongseok Koh, Slava Ovsiienko; +Cc: dev, Jack Min

Hi,
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Xiaoyu Min
> Sent: Wednesday, July 24, 2019 2:32 PM
> To: Shahaf Shuler <shahafs@mellanox.com>; Yongseok Koh
> <yskoh@mellanox.com>; Slava Ovsiienko <viacheslavo@mellanox.com>
> Cc: dev@dpdk.org; Jack Min <jackmin@mellanox.com>
> Subject: [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP
> 
> The RSS expand function for IP-in-IP tunnel type is missed,
> which leads to create following flow failed:
> 
>    flow create 0 ingress pattern eth / ipv4 proto is 4 /
>         ipv4 / udp / end actions rss queues 0 1 end level 2
> 	types ip ipv4-other udp ipv4 ipv4-frag end /
> 	mark id 221 / count / end
> 
> In order to make RSS expand function working correctly,
> now the way to check whether a IP tunnel existing is to
> check whether there is the second IPv4/IPv6 item and whether the
> first IPv4/IPv6 item's next protocl is IPPROTO_IPIP/IPPROTO_IPV6.
> For example:
>   ... pattern eth / ipv4 proto is 4 / ipv4 / ....
> 
> Fixes: 5e33bebdd8d3 ("net/mlx5: support IP-in-IP tunnel")
> Cc: jackmin@mellanox.com
> 
> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
> ---
>  drivers/net/mlx5/mlx5_flow.c    |  8 +++-
>  drivers/net/mlx5/mlx5_flow_dv.c | 79 ++++++++++++++++++++-------------
>  2 files changed, 53 insertions(+), 34 deletions(-)
> 

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-08-06 10:38 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-24 11:31 [dpdk-dev] [PATCH] net/mlx5: fix RSS expand for IP-in-IP Xiaoyu Min
2019-08-06  9:20 ` [dpdk-dev] [Suspected-Phishing][PATCH] " Slava Ovsiienko
2019-08-06 10:38 ` [dpdk-dev] [PATCH] " Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).