DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org, Yongseok Koh <yskoh@mellanox.com>
Cc: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Subject: [dpdk-dev] [PATCH v4 16/21] net/mlx5: support inner RSS computation
Date: Thu, 12 Jul 2018 11:31:02 +0200	[thread overview]
Message-ID: <a3590f4c1a43bcfc46c1a1512e9e60b4264a66af.1531387413.git.nelio.laranjeiro@6wind.com> (raw)
In-Reply-To: <cover.1531387413.git.nelio.laranjeiro@6wind.com>

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 245 ++++++++++++++++++++++++++---------
 1 file changed, 185 insertions(+), 60 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 730360b22..84bd99b3e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -35,18 +35,42 @@
 extern const struct eth_dev_ops mlx5_dev_ops;
 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
-/* Pattern Layer bits. */
+/* Pattern outer Layer bits. */
 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-/* Masks. */
+
+/* Pattern inner Layer bits. */
+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+
+/* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
 #define MLX5_FLOW_LAYER_OUTER_L4 \
 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
+	 MLX5_FLOW_LAYER_OUTER_L4)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL 0
+
+/* Inner Masks. */
+#define MLX5_FLOW_LAYER_INNER_L3 \
+	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
+#define MLX5_FLOW_LAYER_INNER_L4 \
+	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
+#define MLX5_FLOW_LAYER_INNER \
+	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
+	 MLX5_FLOW_LAYER_INNER_L4)
 
 /* Actions that modify the fate of matching traffic. */
 #define MLX5_FLOW_FATE_DROP (1u << 0)
@@ -66,6 +90,14 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
 enum mlx5_expansion {
 	MLX5_EXPANSION_ROOT,
+	MLX5_EXPANSION_ROOT_OUTER,
+	MLX5_EXPANSION_OUTER_ETH,
+	MLX5_EXPANSION_OUTER_IPV4,
+	MLX5_EXPANSION_OUTER_IPV4_UDP,
+	MLX5_EXPANSION_OUTER_IPV4_TCP,
+	MLX5_EXPANSION_OUTER_IPV6,
+	MLX5_EXPANSION_OUTER_IPV6_UDP,
+	MLX5_EXPANSION_OUTER_IPV6_TCP,
 	MLX5_EXPANSION_ETH,
 	MLX5_EXPANSION_IPV4,
 	MLX5_EXPANSION_IPV4_UDP,
@@ -83,6 +115,50 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
 						 MLX5_EXPANSION_IPV6),
 		.type = RTE_FLOW_ITEM_TYPE_END,
 	},
+	[MLX5_EXPANSION_ROOT_OUTER] = {
+		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+						 MLX5_EXPANSION_OUTER_IPV4,
+						 MLX5_EXPANSION_OUTER_IPV6),
+		.type = RTE_FLOW_ITEM_TYPE_END,
+	},
+	[MLX5_EXPANSION_OUTER_ETH] = {
+		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+						 MLX5_EXPANSION_OUTER_IPV6),
+		.type = RTE_FLOW_ITEM_TYPE_ETH,
+		.rss_types = 0,
+	},
+	[MLX5_EXPANSION_OUTER_IPV4] = {
+		.next = RTE_FLOW_EXPAND_RSS_NEXT
+			(MLX5_EXPANSION_OUTER_IPV4_UDP,
+			 MLX5_EXPANSION_OUTER_IPV4_TCP),
+		.type = RTE_FLOW_ITEM_TYPE_IPV4,
+		.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+			ETH_RSS_NONFRAG_IPV4_OTHER,
+	},
+	[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+		.type = RTE_FLOW_ITEM_TYPE_UDP,
+		.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+	},
+	[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
+		.type = RTE_FLOW_ITEM_TYPE_TCP,
+		.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+	},
+	[MLX5_EXPANSION_OUTER_IPV6] = {
+		.next = RTE_FLOW_EXPAND_RSS_NEXT
+			(MLX5_EXPANSION_OUTER_IPV6_UDP,
+			 MLX5_EXPANSION_OUTER_IPV6_TCP),
+		.type = RTE_FLOW_ITEM_TYPE_IPV6,
+		.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+			ETH_RSS_NONFRAG_IPV6_OTHER,
+	},
+	[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+		.type = RTE_FLOW_ITEM_TYPE_UDP,
+		.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+	},
+	[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
+		.type = RTE_FLOW_ITEM_TYPE_TCP,
+		.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+	},
 	[MLX5_EXPANSION_ETH] = {
 		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
 						 MLX5_EXPANSION_IPV6),
@@ -453,6 +529,34 @@ mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
 	verbs->size += size;
 }
 
+/**
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in, out] flow.
+ *   Pointer to flow structure.
+ * @param[in] tunnel
+ *   1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ *   ETH_RSS_* types.
+ * @param[in] hash_fields
+ *   Item hash fields.
+ */
+static void
+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, int tunnel __rte_unused,
+				  uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+	hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+	if (flow->rss.level == 2 && !tunnel)
+		hash_fields = 0;
+	else if (flow->rss.level < 2 && tunnel)
+		hash_fields = 0;
+#endif
+	if (!(flow->rss.types & layer_types))
+		hash_fields = 0;
+	flow->cur_verbs->hash_fields |= hash_fields;
+}
+
 /**
  * Convert the @p item into a Verbs specification after ensuring the NIC
  * will understand and process it correctly.
@@ -486,14 +590,16 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 		.type = RTE_BE16(0xffff),
 	};
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
 	struct ibv_flow_spec_eth eth = {
-		.type = IBV_FLOW_SPEC_ETH,
+		.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
 
-	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
+	if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+			    MLX5_FLOW_LAYER_OUTER_L2))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
@@ -506,7 +612,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
 					error);
 	if (ret)
 		return ret;
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
+	flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+		MLX5_FLOW_LAYER_OUTER_L2;
 	if (size > flow_size)
 		return size;
 	if (spec) {
@@ -543,7 +650,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
 			   struct ibv_flow_spec_eth *eth)
 {
 	unsigned int i;
-	enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
+	const enum ibv_flow_spec_type search = eth->type;
 	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
 		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
 
@@ -596,16 +703,19 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
 		.inner_type = RTE_BE16(0xffff),
 	};
 	unsigned int size = sizeof(struct ibv_flow_spec_eth);
-	struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	struct ibv_flow_spec_eth eth = {
-		.type = IBV_FLOW_SPEC_ETH,
+		.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
-	const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
-			MLX5_FLOW_LAYER_OUTER_L4;
-	const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
-	const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
+	const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+					MLX5_FLOW_LAYER_INNER_L4) :
+		(MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+	const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+		MLX5_FLOW_LAYER_OUTER_VLAN;
+	const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+		MLX5_FLOW_LAYER_OUTER_L2;
 
 	if (flow->layers & vlanm)
 		return rte_flow_error_set(error, ENOTSUP,
@@ -648,11 +758,14 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
 			mlx5_flow_spec_verbs_add(flow, &eth, size);
 		}
 	} else {
-		if (verbs->attr)
-			mlx5_flow_item_vlan_update(verbs->attr, &eth);
+		if (flow->cur_verbs)
+			mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
+						   &eth);
 		size = 0; /* Only an update is done in eth specification. */
 	}
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN;
+	flow->layers |= tunnel ?
+		(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+		(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
 	return size;
 }
 
@@ -692,19 +805,23 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
 			.next_proto_id = 0xff,
 		},
 	};
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
 	struct ibv_flow_spec_ipv4_ext ipv4 = {
-		.type = IBV_FLOW_SPEC_IPV4_EXT,
+		.type = IBV_FLOW_SPEC_IPV4_EXT |
+			(tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
 
-	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
+	if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+			    MLX5_FLOW_LAYER_OUTER_L3))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
 					  "multiple L3 layers not supported");
-	else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
+	else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				 MLX5_FLOW_LAYER_OUTER_L4))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
@@ -717,7 +834,8 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
 		 sizeof(struct rte_flow_item_ipv4), error);
 	if (ret < 0)
 		return ret;
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+	flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+		MLX5_FLOW_LAYER_OUTER_L3_IPV4;
 	if (spec) {
 		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
 			.src_ip = spec->hdr.src_addr,
@@ -740,14 +858,11 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
 	flow->l3_protocol_en = !!ipv4.mask.proto;
 	flow->l3_protocol = ipv4.val.proto;
 	if (size <= flow_size) {
-		uint64_t hash_fields = IBV_RX_HASH_SRC_IPV4 |
-			IBV_RX_HASH_DST_IPV4;
-
-		if (!(flow->rss.types &
-		      (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-		       ETH_RSS_NONFRAG_IPV4_OTHER)))
-			hash_fields = 0;
-		flow->cur_verbs->hash_fields |= hash_fields;
+		mlx5_flow_verbs_hashfields_adjust
+			(flow, tunnel,
+			 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+			  ETH_RSS_NONFRAG_IPV4_OTHER),
+			 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
 		flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
 		mlx5_flow_spec_verbs_add(flow, &ipv4, size);
 	}
@@ -795,19 +910,22 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
 			.hop_limits = 0xff,
 		},
 	};
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
 	struct ibv_flow_spec_ipv6 ipv6 = {
-		.type = IBV_FLOW_SPEC_IPV6,
+		.type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
 
-	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
+	if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+			    MLX5_FLOW_LAYER_OUTER_L3))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
 					  "multiple L3 layers not supported");
-	else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
+	else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				 MLX5_FLOW_LAYER_OUTER_L4))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
@@ -820,7 +938,8 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
 		 sizeof(struct rte_flow_item_ipv6), error);
 	if (ret < 0)
 		return ret;
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+	flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+		MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	if (spec) {
 		unsigned int i;
 		uint32_t vtc_flow_val;
@@ -863,13 +982,10 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
 	flow->l3_protocol_en = !!ipv6.mask.next_hdr;
 	flow->l3_protocol = ipv6.val.next_hdr;
 	if (size <= flow_size) {
-		uint64_t hash_fields = IBV_RX_HASH_SRC_IPV6 |
-			IBV_RX_HASH_DST_IPV6;
-
-		if (!(flow->rss.types &
-		      (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER)))
-			hash_fields = 0;
-		flow->cur_verbs->hash_fields |= hash_fields;
+		mlx5_flow_verbs_hashfields_adjust
+			(flow, tunnel,
+			 (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
+			 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
 		flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
 		mlx5_flow_spec_verbs_add(flow, &ipv6, size);
 	}
@@ -904,9 +1020,10 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
 {
 	const struct rte_flow_item_udp *spec = item->spec;
 	const struct rte_flow_item_udp *mask = item->mask;
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
 	struct ibv_flow_spec_tcp_udp udp = {
-		.type = IBV_FLOW_SPEC_UDP,
+		.type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
@@ -917,13 +1034,15 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
 					  item,
 					  "protocol filtering not compatible"
 					  " with UDP layer");
-	if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+	if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+			      MLX5_FLOW_LAYER_OUTER_L3)))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
 					  "L3 is mandatory to filter"
 					  " on L4");
-	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
+	if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+			    MLX5_FLOW_LAYER_OUTER_L4))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
@@ -937,7 +1056,8 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
 		 sizeof(struct rte_flow_item_udp), error);
 	if (ret < 0)
 		return ret;
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+	flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+		MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	if (spec) {
 		udp.val.dst_port = spec->hdr.dst_port;
 		udp.val.src_port = spec->hdr.src_port;
@@ -948,12 +1068,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
 		udp.val.dst_port &= udp.mask.dst_port;
 	}
 	if (size <= flow_size) {
-		uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_UDP |
-			IBV_RX_HASH_DST_PORT_UDP;
-
-		if (!(flow->rss.types & ETH_RSS_UDP))
-			hash_fields = 0;
-		flow->cur_verbs->hash_fields |= hash_fields;
+		mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
+						  (IBV_RX_HASH_SRC_PORT_UDP |
+						   IBV_RX_HASH_DST_PORT_UDP));
 		flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
 		mlx5_flow_spec_verbs_add(flow, &udp, size);
 	}
@@ -988,9 +1105,10 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
 {
 	const struct rte_flow_item_tcp *spec = item->spec;
 	const struct rte_flow_item_tcp *mask = item->mask;
+	const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
 	struct ibv_flow_spec_tcp_udp tcp = {
-		.type = IBV_FLOW_SPEC_TCP,
+		.type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
 		.size = size,
 	};
 	int ret;
@@ -1001,12 +1119,14 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
 					  item,
 					  "protocol filtering not compatible"
 					  " with TCP layer");
-	if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+	if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+			      MLX5_FLOW_LAYER_OUTER_L3)))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
 					  "L3 is mandatory to filter on L4");
-	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
+	if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+			    MLX5_FLOW_LAYER_OUTER_L4))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
@@ -1019,7 +1139,8 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
 		 sizeof(struct rte_flow_item_tcp), error);
 	if (ret < 0)
 		return ret;
-	flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+	flow->layers |=  tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+		MLX5_FLOW_LAYER_OUTER_L4_TCP;
 	if (spec) {
 		tcp.val.dst_port = spec->hdr.dst_port;
 		tcp.val.src_port = spec->hdr.src_port;
@@ -1030,12 +1151,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
 		tcp.val.dst_port &= tcp.mask.dst_port;
 	}
 	if (size <= flow_size) {
-		uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_TCP |
-			IBV_RX_HASH_DST_PORT_TCP;
-
-		if (!(flow->rss.types & ETH_RSS_TCP))
-			hash_fields = 0;
-		flow->cur_verbs->hash_fields |= hash_fields;
+		mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
+						  (IBV_RX_HASH_SRC_PORT_TCP |
+						   IBV_RX_HASH_DST_PORT_TCP));
 		flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
 		mlx5_flow_spec_verbs_add(flow, &tcp, size);
 	}
@@ -1261,7 +1379,11 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  &rss->func,
 					  "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+	if (rss->level > 2)
+#else
 	if (rss->level > 1)
+#endif
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  &rss->level,
@@ -1301,6 +1423,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,
 	flow->rss.queue_num = rss->queue_num;
 	memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
 	flow->rss.types = rss->types;
+	flow->rss.level = rss->level;
 	flow->fate |= MLX5_FLOW_FATE_RSS;
 	return 0;
 }
@@ -1608,7 +1731,9 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
 		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
 					  pattern, local_flow.rss.types,
 					  mlx5_support_expansion,
-					  MLX5_EXPANSION_ROOT);
+					  local_flow.rss.level < 2 ?
+					  MLX5_EXPANSION_ROOT :
+					  MLX5_EXPANSION_ROOT_OUTER);
 		assert(ret > 0 &&
 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
 	} else {
@@ -1979,8 +2104,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
 			return NULL;
 		}
 	}
-	mlx5_flow_rxq_mark_set(dev, flow);
 	TAILQ_INSERT_TAIL(list, flow, next);
+	mlx5_flow_rxq_mark_set(dev, flow);
 	return flow;
 }
 
-- 
2.18.0

  parent reply	other threads:[~2018-07-12  9:31 UTC|newest]

Thread overview: 118+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-28 11:21 [dpdk-dev] [DPDK 18.08 v1 00/12] net/mlx5: flow rework Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 01/12] net/mlx5: remove flow support Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 02/12] net/mlx5: handle drop queues are regular queues Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 03/12] net/mlx5: support flow Ethernet item among with drop action Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 04/12] net/mlx5: add flow queue action Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 05/12] net/mlx5: add flow stop/start Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 06/12] net/mlx5: add flow VLAN item Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 07/12] net/mlx5: add flow IPv4 item Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 08/12] net/mlx5: add flow IPv6 item Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 09/12] net/mlx5: add flow UDP item Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 10/12] net/mlx5: add flow TCP item Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 11/12] net/mlx5: add mark/flag flow action Nelio Laranjeiro
2018-05-28 11:21 ` [dpdk-dev] [DPDK 18.08 v1 12/12] net/mlx5: add RSS " Nelio Laranjeiro
2018-05-28 13:32 ` [dpdk-dev] [DPDK 18.08 v1 00/12] net/mlx5: flow rework Wiles, Keith
2018-05-28 13:47   ` Ferruh Yigit
2018-05-28 13:50   ` Nélio Laranjeiro
2018-06-27 15:07 ` [dpdk-dev] [PATCH v2 00/20] " Nelio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 01/20] net/mlx5: remove flow support Nelio Laranjeiro
2018-07-02 21:53     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 02/20] net/mlx5: handle drop queues are regular queues Nelio Laranjeiro
2018-07-03  1:07     ` Yongseok Koh
2018-07-03  7:17       ` Nélio Laranjeiro
2018-07-03 17:05         ` Yongseok Koh
2018-07-04  6:44           ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 03/20] net/mlx5: replace verbs priorities by flow Nelio Laranjeiro
2018-07-03  1:40     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 04/20] net/mlx5: support flow Ethernet item among with drop action Nelio Laranjeiro
2018-07-03 22:27     ` Yongseok Koh
2018-07-04  9:24       ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 05/20] net/mlx5: add flow queue action Nelio Laranjeiro
2018-07-03 23:00     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 06/20] net/mlx5: add flow stop/start Nelio Laranjeiro
2018-07-03 23:08     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 07/20] net/mlx5: add flow VLAN item Nelio Laranjeiro
2018-07-03 23:56     ` Yongseok Koh
2018-07-04 12:03       ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 08/20] net/mlx5: add flow IPv4 item Nelio Laranjeiro
2018-07-04  0:12     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 09/20] net/mlx5: add flow IPv6 item Nelio Laranjeiro
2018-07-04  0:16     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 10/20] net/mlx5: add flow UDP item Nelio Laranjeiro
2018-07-04  0:17     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 11/20] net/mlx5: add flow TCP item Nelio Laranjeiro
2018-07-04  0:18     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 12/20] net/mlx5: add mark/flag flow action Nelio Laranjeiro
2018-07-04  8:34     ` Yongseok Koh
2018-07-05  8:47       ` Nélio Laranjeiro
2018-07-05 19:56         ` Yongseok Koh
2018-07-06  8:23           ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 13/20] net/mlx5: add RSS " Nelio Laranjeiro
2018-07-06  2:16     ` Yongseok Koh
2018-07-06 15:59       ` Nélio Laranjeiro
2018-07-06 17:35         ` Yongseok Koh
2018-07-09 13:09           ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 14/20] net/mlx5: remove useless arguments in hrxq API Nelio Laranjeiro
2018-07-06  2:18     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 15/20] net/mlx5: support inner RSS computation Nelio Laranjeiro
2018-07-06  8:16     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 16/20] net/mlx5: add flow VXLAN item Nelio Laranjeiro
2018-07-06 23:14     ` Yongseok Koh
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 17/20] net/mlx5: add flow VXLAN-GPE item Nelio Laranjeiro
2018-07-06 23:23     ` Yongseok Koh
2018-07-09 14:53       ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 18/20] net/mlx5: add flow GRE item Nelio Laranjeiro
2018-07-06 23:46     ` Yongseok Koh
2018-07-09 13:58       ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 19/20] net/mlx5: add flow MPLS item Nelio Laranjeiro
2018-07-07  0:11     ` Yongseok Koh
2018-07-09 15:00       ` Nélio Laranjeiro
2018-06-27 15:07   ` [dpdk-dev] [PATCH v2 20/20] net/mlx5: add count flow action Nelio Laranjeiro
2018-07-07  1:08     ` Yongseok Koh
2018-07-11  7:22   ` [dpdk-dev] [PATCH v3 00/21] net/mlx5: flow rework Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 01/21] net/mlx5: remove flow support Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 02/21] net/mlx5: handle drop queues as regular queues Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 03/21] net/mlx5: replace verbs priorities by flow Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 04/21] net/mlx5: support flow Ethernet item along with drop action Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 05/21] net/mlx5: add flow queue action Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 06/21] net/mlx5: add flow stop/start Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 07/21] net/mlx5: add flow VLAN item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 08/21] net/mlx5: add flow IPv4 item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 09/21] net/mlx5: add flow IPv6 item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 10/21] net/mlx5: add flow UDP item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 11/21] net/mlx5: add flow TCP item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 12/21] net/mlx5: add mark/flag flow action Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 13/21] net/mlx5: use a macro for the RSS key size Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 14/21] net/mlx5: add RSS flow action Nelio Laranjeiro
2018-07-11 19:57       ` Yongseok Koh
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 15/21] net/mlx5: remove useless arguments in hrxq API Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 16/21] net/mlx5: support inner RSS computation Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 17/21] net/mlx5: add flow VXLAN item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 18/21] net/mlx5: add flow VXLAN-GPE item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 19/21] net/mlx5: add flow GRE item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 20/21] net/mlx5: add flow MPLS item Nelio Laranjeiro
2018-07-11  7:22     ` [dpdk-dev] [PATCH v3 21/21] net/mlx5: add count flow action Nelio Laranjeiro
2018-07-11 20:00     ` [dpdk-dev] [PATCH v3 00/21] net/mlx5: flow rework Yongseok Koh
2018-07-12  9:30     ` [dpdk-dev] [PATCH v4 " Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 01/21] net/mlx5: remove flow support Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 02/21] net/mlx5: handle drop queues as regular queues Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 03/21] net/mlx5: replace verbs priorities by flow Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 04/21] net/mlx5: support flow Ethernet item along with drop action Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 05/21] net/mlx5: add flow queue action Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 06/21] net/mlx5: add flow stop/start Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 07/21] net/mlx5: add flow VLAN item Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 08/21] net/mlx5: add flow IPv4 item Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 09/21] net/mlx5: add flow IPv6 item Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 10/21] net/mlx5: add flow UDP item Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 11/21] net/mlx5: add flow TCP item Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 12/21] net/mlx5: add mark/flag flow action Nelio Laranjeiro
2018-07-12  9:30       ` [dpdk-dev] [PATCH v4 13/21] net/mlx5: use a macro for the RSS key size Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 14/21] net/mlx5: add RSS flow action Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 15/21] net/mlx5: remove useless arguments in hrxq API Nelio Laranjeiro
2018-07-12  9:31       ` Nelio Laranjeiro [this message]
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 17/21] net/mlx5: add flow VXLAN item Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 18/21] net/mlx5: add flow VXLAN-GPE item Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 19/21] net/mlx5: add flow GRE item Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 20/21] net/mlx5: add flow MPLS item Nelio Laranjeiro
2018-07-12  9:31       ` [dpdk-dev] [PATCH v4 21/21] net/mlx5: add count flow action Nelio Laranjeiro
2018-07-12 10:44       ` [dpdk-dev] [PATCH v4 00/21] net/mlx5: flow rework Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a3590f4c1a43bcfc46c1a1512e9e60b4264a66af.1531387413.git.nelio.laranjeiro@6wind.com \
    --to=nelio.laranjeiro@6wind.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).