DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org
Cc: adrien.mazarguil@6wind.com, yskoh@mellanox.com, ferruh.yigit@intel.com
Subject: [dpdk-dev] [PATCH v2 28/30] net/mlx5: handle RSS hash configuration in RSS flow
Date: Thu,  5 Oct 2017 14:50:00 +0200	[thread overview]
Message-ID: <8f61b0656eab8f398c065b72f2496dc785fde0f8.1507207731.git.nelio.laranjeiro@6wind.com> (raw)
In-Reply-To: <cover.1507205686.git.nelio.laranjeiro@6wind.com>
In-Reply-To: <cover.1507207731.git.nelio.laranjeiro@6wind.com>

Add RSS support according to the RSS configuration.

A special case is handled, when the pattern does not cover the RSS hash
configuration request such as:

 flow create 0 ingress pattern eth / end actions rss queues 0 1 end / end

In such situation with the default configuration of testpmd RSS i.e. IP,
it should be converted to 3 Verbs flow to handle correctly the request:

 1. IPv4 flow, an extra IPv4 wildcard specification needs to be added in
    the conversion.
 2. IPv6 flow, same as for IPv4.
 3. Ethernet followed by any other protocol on which no RSS can be
    performed and thus the traffic will be redirected to the first queue of
    the user request.

The same kind of issue is handled if the RSS is performed only on UDPv4 or
UDPv6 or TCPv*.

This does not handle a priority conflict which can occurs if the user adds
several colliding flow rules.  Currently in the example above, the request
is already consuming 2 priorities (1 for IPv4/IPV6 matching rule priority
and one for Ethernet matching rule priority + 1).

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 1424 ++++++++++++++++++++++++++++++------------
 1 file changed, 1022 insertions(+), 402 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ada144..d821c79 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -55,6 +55,10 @@
 /* Define minimal priority for control plane flows. */
 #define MLX5_CTRL_FLOW_PRIORITY 4
 
+/* Internet Protocol versions. */
+#define MLX5_IPV4 4
+#define MLX5_IPV6 6
+
 static int
 mlx5_flow_create_eth(const struct rte_flow_item *item,
 		     const void *default_mask,
@@ -90,6 +94,98 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 		       const void *default_mask,
 		       void *data);
 
+struct mlx5_flow_parse;
+
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+		      unsigned int size);
+
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
+
+/* Hash RX queue types. */
+enum hash_rxq_type {
+	HASH_RXQ_TCPV4,
+	HASH_RXQ_UDPV4,
+	HASH_RXQ_IPV4,
+	HASH_RXQ_TCPV6,
+	HASH_RXQ_UDPV6,
+	HASH_RXQ_IPV6,
+	HASH_RXQ_ETH,
+};
+
+/* Initialization data for hash RX queue. */
+struct hash_rxq_init {
+	uint64_t hash_fields; /* Fields that participate in the hash. */
+	uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
+	unsigned int flow_priority; /* Flow priority to use. */
+	unsigned int ip_version; /* Internet protocol. */
+};
+
+/* Initialization data for hash RX queues. */
+const struct hash_rxq_init hash_rxq_init[] = {
+	[HASH_RXQ_TCPV4] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+				IBV_RX_HASH_DST_IPV4 |
+				IBV_RX_HASH_SRC_PORT_TCP |
+				IBV_RX_HASH_DST_PORT_TCP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
+		.flow_priority = 0,
+		.ip_version = MLX5_IPV4,
+	},
+	[HASH_RXQ_UDPV4] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+				IBV_RX_HASH_DST_IPV4 |
+				IBV_RX_HASH_SRC_PORT_UDP |
+				IBV_RX_HASH_DST_PORT_UDP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
+		.flow_priority = 0,
+		.ip_version = MLX5_IPV4,
+	},
+	[HASH_RXQ_IPV4] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+				IBV_RX_HASH_DST_IPV4),
+		.dpdk_rss_hf = (ETH_RSS_IPV4 |
+				ETH_RSS_FRAG_IPV4),
+		.flow_priority = 1,
+		.ip_version = MLX5_IPV4,
+	},
+	[HASH_RXQ_TCPV6] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+				IBV_RX_HASH_DST_IPV6 |
+				IBV_RX_HASH_SRC_PORT_TCP |
+				IBV_RX_HASH_DST_PORT_TCP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+		.flow_priority = 0,
+		.ip_version = MLX5_IPV6,
+	},
+	[HASH_RXQ_UDPV6] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+				IBV_RX_HASH_DST_IPV6 |
+				IBV_RX_HASH_SRC_PORT_UDP |
+				IBV_RX_HASH_DST_PORT_UDP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+		.flow_priority = 0,
+		.ip_version = MLX5_IPV6,
+	},
+	[HASH_RXQ_IPV6] = {
+		.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+				IBV_RX_HASH_DST_IPV6),
+		.dpdk_rss_hf = (ETH_RSS_IPV6 |
+				ETH_RSS_FRAG_IPV6),
+		.flow_priority = 1,
+		.ip_version = MLX5_IPV6,
+	},
+	[HASH_RXQ_ETH] = {
+		.hash_fields = 0,
+		.dpdk_rss_hf = 0,
+		.flow_priority = 2,
+	},
+};
+
+/* Number of entries in hash_rxq_init[]. */
+const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
 /** Structure for Drop queue. */
 struct mlx5_hrxq_drop {
 	struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
@@ -110,7 +206,6 @@ struct mlx5_flow {
 struct mlx5_flow_drop {
 	struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
 	struct ibv_flow *ibv_flow; /**< Verbs flow. */
-	struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
 };
 
 struct rte_flow {
@@ -119,8 +214,11 @@ struct rte_flow {
 	uint32_t drop:1; /**< Drop queue. */
 	uint16_t queues_n; /**< Number of entries in queue[]. */
 	uint16_t (*queues)[]; /**< Queues indexes to use. */
+	struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+	uint8_t rss_key[40]; /**< copy of the RSS key. */
 	union {
-		struct mlx5_flow frxq; /**< Flow with Rx queue. */
+		struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
+		/**< Flow with Rx queue. */
 		struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
 	};
 };
@@ -224,7 +322,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.default_mask = &rte_flow_item_ipv4_mask,
 		.mask_sz = sizeof(struct rte_flow_item_ipv4),
 		.convert = mlx5_flow_create_ipv4,
-		.dst_sz = sizeof(struct ibv_flow_spec_ipv4),
+		.dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
 	},
 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
@@ -296,17 +394,31 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 
 /** Structure to pass to the conversion function. */
 struct mlx5_flow_parse {
-	struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
-	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
 	uint32_t inner; /**< Set once VXLAN is encountered. */
-	uint32_t create:1; /**< Leave allocated resources on exit. */
-	uint32_t queue:1; /**< Target is a receive queue. */
+	uint32_t create:1;
+	/**< Whether resources should remain after a validate. */
 	uint32_t drop:1; /**< Target is a drop queue. */
 	uint32_t mark:1; /**< Mark is present in the flow. */
 	uint32_t mark_id; /**< Mark identifier. */
-	uint64_t hash_fields; /**< Fields that participate in the hash. */
 	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
 	uint16_t queues_n; /**< Number of entries in queue[]. */
+	struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+	uint8_t rss_key[40]; /**< copy of the RSS key. */
+	enum hash_rxq_type layer; /**< Last pattern layer detected. */
+	union {
+		struct {
+			struct ibv_flow_attr *ibv_attr;
+			/**< Pointer to Verbs attributes. */
+			unsigned int offset;
+			/**< Current position or total size of the attribute. */
+		} queue[RTE_DIM(hash_rxq_init)];
+		struct {
+			struct ibv_flow_attr *ibv_attr;
+			/**< Pointer to Verbs attributes. */
+			unsigned int offset;
+			/**< Current position or total size of the attribute. */
+		} drop_q;
+	};
 };
 
 static const struct rte_flow_ops mlx5_flow_ops = {
@@ -416,16 +528,42 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
 }
 
 /**
- * Validate and convert a flow supported by the NIC.
+ * Copy the RSS configuration from the user ones.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param parser
+ *   Internal parser structure.
+ * @param rss_conf
+ *   User RSS configuration to save.
+ *
+ * @return
+ *   0 on success, errno value on failure.
+ */
+static int
+priv_flow_convert_rss_conf(struct priv *priv,
+			   struct mlx5_flow_parse *parser,
+			   const struct rte_eth_rss_conf *rss_conf)
+{
+	const struct rte_eth_rss_conf *rss =
+		rss_conf ? rss_conf : &priv->rss_conf;
+
+	if (rss->rss_key_len > 40)
+		return EINVAL;
+	parser->rss_conf.rss_key_len = rss->rss_key_len;
+	parser->rss_conf.rss_hf = rss->rss_hf;
+	memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len);
+	parser->rss_conf.rss_key = parser->rss_key;
+	return 0;
+}
+
+/**
+ * Extract attribute to the parser.
  *
  * @param priv
  *   Pointer to private structure.
  * @param[in] attr
  *   Flow rule attributes.
- * @param[in] pattern
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @param[in, out] parser
@@ -435,22 +573,13 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-priv_flow_convert(struct priv *priv,
-		  const struct rte_flow_attr *attr,
-		  const struct rte_flow_item items[],
-		  const struct rte_flow_action actions[],
-		  struct rte_flow_error *error,
-		  struct mlx5_flow_parse *parser)
+priv_flow_convert_attributes(struct priv *priv,
+			     const struct rte_flow_attr *attr,
+			     struct rte_flow_error *error,
+			     struct mlx5_flow_parse *parser)
 {
-	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
-
 	(void)priv;
-	*parser = (struct mlx5_flow_parse){
-		.ibv_attr = parser->ibv_attr,
-		.create = parser->create,
-		.offset = sizeof(struct ibv_flow_attr),
-		.mark_id = MLX5_FLOW_MARK_DEFAULT,
-	};
+	(void)parser;
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -479,6 +608,37 @@ priv_flow_convert(struct priv *priv,
 				   "only ingress is supported");
 		return -rte_errno;
 	}
+	return 0;
+}
+
+/**
+ * Extract actions request to the parser.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ *   Internal parser structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert_actions(struct priv *priv,
+			  const struct rte_flow_action actions[],
+			  struct rte_flow_error *error,
+			  struct mlx5_flow_parse *parser)
+{
+	/*
+	 * Add default RSS configuration necessary for Verbs to create QP even
+	 * if no RSS is necessary.
+	 */
+	priv_flow_convert_rss_conf(priv, parser,
+				   (const struct rte_eth_rss_conf *)
+				   &priv->rss_conf);
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
 			continue;
@@ -507,7 +667,6 @@ priv_flow_convert(struct priv *priv,
 				return -rte_errno;
 			}
 			if (!found) {
-				parser->queue = 1;
 				parser->queues_n = 1;
 				parser->queues[0] = queue->index;
 			}
@@ -554,10 +713,17 @@ priv_flow_convert(struct priv *priv,
 					return -rte_errno;
 				}
 			}
-			parser->queue = 1;
 			for (n = 0; n < rss->num; ++n)
 				parser->queues[n] = rss->queue[n];
 			parser->queues_n = rss->num;
+			if (priv_flow_convert_rss_conf(priv, parser,
+						       rss->rss_conf)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "wrong RSS configuration");
+				return -rte_errno;
+			}
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
 			const struct rte_flow_action_mark *mark =
 				(const struct rte_flow_action_mark *)
@@ -585,18 +751,53 @@ priv_flow_convert(struct priv *priv,
 			goto exit_action_not_supported;
 		}
 	}
-	if (parser->mark && !parser->ibv_attr && !parser->drop)
-		parser->offset += sizeof(struct ibv_flow_spec_action_tag);
-	if (!parser->ibv_attr && parser->drop)
-		parser->offset += sizeof(struct ibv_flow_spec_action_drop);
-	if (!parser->queue && !parser->drop) {
+	if (!parser->queues_n && !parser->drop) {
 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "no valid action");
 		return -rte_errno;
 	}
+	return 0;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
+/**
+ * Validate items.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ *   Internal parser structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert_items_validate(struct priv *priv,
+				 const struct rte_flow_item items[],
+				 struct rte_flow_error *error,
+				 struct mlx5_flow_parse *parser)
+{
+	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+	unsigned int i;
+
+	(void)priv;
+	/* Initialise the offsets to start after verbs attribute. */
+	if (parser->drop) {
+		parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+	} else {
+		for (i = 0; i != hash_rxq_init_n; ++i)
+			parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+	}
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
 		const struct mlx5_flow_items *token = NULL;
-		unsigned int i;
+		unsigned int n;
 		int err;
 
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
@@ -618,15 +819,7 @@ priv_flow_convert(struct priv *priv,
 					      cur_item->mask_sz);
 		if (err)
 			goto exit_item_not_supported;
-		if (parser->ibv_attr && cur_item->convert) {
-			err = cur_item->convert(items,
-						(cur_item->default_mask ?
-						 cur_item->default_mask :
-						 cur_item->mask),
-						parser);
-			if (err)
-				goto exit_item_not_supported;
-		} else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
 			if (parser->inner) {
 				rte_flow_error_set(error, ENOTSUP,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
@@ -637,17 +830,367 @@ priv_flow_convert(struct priv *priv,
 			}
 			parser->inner = 1;
 		}
-		parser->offset += cur_item->dst_sz;
+		if (parser->drop) {
+			parser->drop_q.offset += cur_item->dst_sz;
+		} else if (parser->queues_n == 1) {
+			parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
+		} else {
+			for (n = 0; n != hash_rxq_init_n; ++n)
+				parser->queue[n].offset += cur_item->dst_sz;
+		}
+	}
+	if (parser->mark) {
+		for (i = 0; i != hash_rxq_init_n; ++i)
+			parser->queue[i].offset +=
+				sizeof(struct ibv_flow_spec_action_tag);
 	}
 	return 0;
 exit_item_not_supported:
 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
 			   items, "item not supported");
 	return -rte_errno;
-exit_action_not_supported:
-	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
-			   actions, "action not supported");
-	return -rte_errno;
+}
+
+/**
+ * Allocate memory space to store verbs flow attributes.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] priority
+ *   Flow priority.
+ * @param[in] size
+ *   Amount of byte to allocate.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A verbs flow attribute on success, NULL otherwise.
+ */
+static struct ibv_flow_attr*
+priv_flow_convert_allocate(struct priv *priv,
+			   unsigned int priority,
+			   unsigned int size,
+			   struct rte_flow_error *error)
+{
+	struct ibv_flow_attr *ibv_attr;
+
+	(void)priv;
+	ibv_attr = rte_calloc(__func__, 1, size, 0);
+	if (!ibv_attr) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "cannot allocate verbs spec attributes.");
+		return NULL;
+	}
+	ibv_attr->priority = priority;
+	return ibv_attr;
+}
+
+/**
+ * Finalise verbs flow attributes.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in, out] parser
+ *   Internal parser structure.
+ */
+static void
+priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+{
+	const unsigned int ipv4 =
+		hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
+	const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
+	const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+	const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
+	const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
+	const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+	unsigned int i;
+
+	(void)priv;
+	if (parser->layer == HASH_RXQ_ETH) {
+		goto fill;
+	} else {
+		/*
+		 * This layer becomes useless as the pattern define under
+		 * layers.
+		 */
+		rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
+		parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
+	}
+	/* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
+	for (i = ohmin; i != (ohmax + 1); ++i) {
+		if (!parser->queue[i].ibv_attr)
+			continue;
+		rte_free(parser->queue[i].ibv_attr);
+		parser->queue[i].ibv_attr = NULL;
+	}
+	/* Remove impossible flow according to the RSS configuration. */
+	if (hash_rxq_init[parser->layer].dpdk_rss_hf &
+	    parser->rss_conf.rss_hf) {
+		/* Remove any other flow. */
+		for (i = hmin; i != (hmax + 1); ++i) {
+			if ((i == parser->layer) ||
+			     (!parser->queue[i].ibv_attr))
+				continue;
+			rte_free(parser->queue[i].ibv_attr);
+			parser->queue[i].ibv_attr = NULL;
+		}
+	} else  if (!parser->queue[ip].ibv_attr) {
+		/* no RSS possible with the current configuration. */
+		parser->queues_n = 1;
+		return;
+	}
+fill:
+	/*
+	 * Fill missing layers in verbs specifications, or compute the correct
+	 * offset to allocate the memory space for the attributes and
+	 * specifications.
+	 */
+	for (i = 0; i != hash_rxq_init_n - 1; ++i) {
+		union {
+			struct ibv_flow_spec_ipv4_ext ipv4;
+			struct ibv_flow_spec_ipv6 ipv6;
+			struct ibv_flow_spec_tcp_udp udp_tcp;
+		} specs;
+		void *dst;
+		uint16_t size;
+
+		if (i == parser->layer)
+			continue;
+		if (parser->layer == HASH_RXQ_ETH) {
+			if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
+				size = sizeof(struct ibv_flow_spec_ipv4_ext);
+				specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
+					.type = IBV_FLOW_SPEC_IPV4_EXT |
+						parser->inner,
+					.size = size,
+				};
+			} else {
+				size = sizeof(struct ibv_flow_spec_ipv6);
+				specs.ipv6 = (struct ibv_flow_spec_ipv6){
+					.type = IBV_FLOW_SPEC_IPV6 |
+						parser->inner,
+					.size = size,
+				};
+			}
+			if (parser->queue[i].ibv_attr) {
+				dst = (void *)((uintptr_t)
+					       parser->queue[i].ibv_attr +
+					       parser->queue[i].offset);
+				memcpy(dst, &specs, size);
+				++parser->queue[i].ibv_attr->num_of_specs;
+			}
+			parser->queue[i].offset += size;
+		}
+		if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
+		    (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
+			size = sizeof(struct ibv_flow_spec_tcp_udp);
+			specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
+				.type = ((i == HASH_RXQ_UDPV4 ||
+					  i == HASH_RXQ_UDPV6) ?
+					 IBV_FLOW_SPEC_UDP :
+					 IBV_FLOW_SPEC_TCP) |
+					parser->inner,
+				.size = size,
+			};
+			if (parser->queue[i].ibv_attr) {
+				dst = (void *)((uintptr_t)
+					       parser->queue[i].ibv_attr +
+					       parser->queue[i].offset);
+				memcpy(dst, &specs, size);
+				++parser->queue[i].ibv_attr->num_of_specs;
+			}
+			parser->queue[i].offset += size;
+		}
+	}
+}
+
+/**
+ * Validate and convert a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ *   Internal parser structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert(struct priv *priv,
+		  const struct rte_flow_attr *attr,
+		  const struct rte_flow_item items[],
+		  const struct rte_flow_action actions[],
+		  struct rte_flow_error *error,
+		  struct mlx5_flow_parse *parser)
+{
+	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+	unsigned int i;
+	int ret;
+
+	/* First step. Validate the attributes, items and actions. */
+	*parser = (struct mlx5_flow_parse){
+		.create = parser->create,
+		.layer = HASH_RXQ_ETH,
+		.mark_id = MLX5_FLOW_MARK_DEFAULT,
+	};
+	ret = priv_flow_convert_attributes(priv, attr, error, parser);
+	if (ret)
+		return ret;
+	ret = priv_flow_convert_actions(priv, actions, error, parser);
+	if (ret)
+		return ret;
+	ret = priv_flow_convert_items_validate(priv, items, error, parser);
+	if (ret)
+		return ret;
+	priv_flow_convert_finalise(priv, parser);
+	/*
+	 * Second step.
+	 * Allocate the memory space to store verbs specifications.
+	 */
+	if (parser->drop) {
+		parser->drop_q.ibv_attr =
+			priv_flow_convert_allocate(priv, attr->priority,
+						   parser->drop_q.offset,
+						   error);
+		if (!parser->drop_q.ibv_attr)
+			return ENOMEM;
+		parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+	} else if (parser->queues_n == 1) {
+		unsigned int priority =
+			attr->priority +
+			hash_rxq_init[HASH_RXQ_ETH].flow_priority;
+		unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
+
+		parser->queue[HASH_RXQ_ETH].ibv_attr =
+			priv_flow_convert_allocate(priv, priority,
+						   offset, error);
+		if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
+			return ENOMEM;
+		parser->queue[HASH_RXQ_ETH].offset =
+			sizeof(struct ibv_flow_attr);
+	} else {
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			unsigned int priority =
+				attr->priority +
+				hash_rxq_init[HASH_RXQ_ETH].flow_priority;
+			unsigned int offset;
+
+			if (!(parser->rss_conf.rss_hf &
+			      hash_rxq_init[i].dpdk_rss_hf) &&
+			    (i != HASH_RXQ_ETH))
+				continue;
+			offset = parser->queue[i].offset;
+			parser->queue[i].ibv_attr =
+				priv_flow_convert_allocate(priv, priority,
+							   offset, error);
+			if (!parser->queue[i].ibv_attr)
+				goto exit_enomem;
+			parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+		}
+	}
+	/* Third step. Conversion parse, fill the specifications. */
+	parser->inner = 0;
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+			continue;
+		cur_item = &mlx5_flow_items[items->type];
+		ret = cur_item->convert(items,
+					(cur_item->default_mask ?
+					 cur_item->default_mask :
+					 cur_item->mask),
+					parser);
+		if (ret) {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   items, "item not supported");
+			goto exit_free;
+		}
+	}
+	if (parser->mark)
+		mlx5_flow_create_flag_mark(parser, parser->mark_id);
+	/*
+	 * Last step. Complete missing specification to reach the RSS
+	 * configuration.
+	 */
+	if (parser->queues_n > 1)
+		priv_flow_convert_finalise(priv, parser);
+exit_free:
+	/* Only verification is expected, all resources should be released. */
+	if (!parser->create) {
+		if (parser->drop) {
+			rte_free(parser->drop_q.ibv_attr);
+			parser->drop_q.ibv_attr = NULL;
+		}
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (parser->queue[i].ibv_attr) {
+				rte_free(parser->queue[i].ibv_attr);
+				parser->queue[i].ibv_attr = NULL;
+			}
+		}
+	}
+	return ret;
+exit_enomem:
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (parser->queue[i].ibv_attr) {
+			rte_free(parser->queue[i].ibv_attr);
+			parser->queue[i].ibv_attr = NULL;
+		}
+	}
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "cannot allocate verbs spec attributes.");
+	return ret;
+}
+
+/**
+ * Copy the specification created into the flow.
+ *
+ * @param parser
+ *   Internal parser structure.
+ * @param src
+ *   Create specification.
+ * @param size
+ *   Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+		      unsigned int size)
+{
+	unsigned int i;
+	void *dst;
+
+	if (parser->drop) {
+		dst = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+				parser->drop_q.offset);
+		memcpy(dst, src, size);
+		++parser->drop_q.ibv_attr->num_of_specs;
+		parser->drop_q.offset += size;
+		return;
+	}
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (!parser->queue[i].ibv_attr)
+			continue;
+		/* Specification must be the same l3 type or none. */
+		if (parser->layer == HASH_RXQ_ETH ||
+		    (hash_rxq_init[parser->layer].ip_version ==
+		     hash_rxq_init[i].ip_version) ||
+		    (hash_rxq_init[i].ip_version == 0)) {
+			dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+					parser->queue[i].offset);
+			memcpy(dst, src, size);
+			++parser->queue[i].ibv_attr->num_of_specs;
+			parser->queue[i].offset += size;
+		}
+	}
 }
 
 /**
@@ -668,33 +1211,32 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 	const struct rte_flow_item_eth *spec = item->spec;
 	const struct rte_flow_item_eth *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_eth *eth;
 	const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
-	unsigned int i;
-
-	++parser->ibv_attr->num_of_specs;
-	parser->hash_fields = 0;
-	eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*eth = (struct ibv_flow_spec_eth) {
+	struct ibv_flow_spec_eth eth = {
 		.type = parser->inner | IBV_FLOW_SPEC_ETH,
 		.size = eth_size,
 	};
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
-	memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
-	eth->val.ether_type = spec->type;
-	memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
-	memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
-	eth->mask.ether_type = mask->type;
-	/* Remove unwanted bits from values. */
-	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
-		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
-		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+
+	parser->layer = HASH_RXQ_ETH;
+	if (spec) {
+		unsigned int i;
+
+		if (!mask)
+			mask = default_mask;
+		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+		eth.val.ether_type = spec->type;
+		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+		eth.mask.ether_type = mask->type;
+		/* Remove unwanted bits from values. */
+		for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+			eth.val.src_mac[i] &= eth.mask.src_mac[i];
+		}
+		eth.val.ether_type &= eth.mask.ether_type;
 	}
-	eth->val.ether_type &= eth->mask.ether_type;
+	mlx5_flow_create_copy(parser, &eth, eth_size);
 	return 0;
 }
 
@@ -719,14 +1261,30 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
 	struct ibv_flow_spec_eth *eth;
 	const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
 
-	eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset - eth_size);
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	eth->val.vlan_tag = spec->tci;
-	eth->mask.vlan_tag = mask->tci;
-	eth->val.vlan_tag &= eth->mask.vlan_tag;
+	if (spec) {
+		unsigned int i;
+		if (!mask)
+			mask = default_mask;
+
+		if (parser->drop) {
+			eth = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+				       parser->drop_q.offset - eth_size);
+			eth->val.vlan_tag = spec->tci;
+			eth->mask.vlan_tag = mask->tci;
+			eth->val.vlan_tag &= eth->mask.vlan_tag;
+			return 0;
+		}
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!parser->queue[i].ibv_attr)
+				continue;
+
+			eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+				       parser->queue[i].offset - eth_size);
+			eth->val.vlan_tag = spec->tci;
+			eth->mask.vlan_tag = mask->tci;
+			eth->val.vlan_tag &= eth->mask.vlan_tag;
+		}
+	}
 	return 0;
 }
 
@@ -748,37 +1306,35 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv4 *spec = item->spec;
 	const struct rte_flow_item_ipv4 *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_ipv4_ext *ipv4;
 	unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
-
-	++parser->ibv_attr->num_of_specs;
-	parser->hash_fields = (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4);
-	ipv4 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*ipv4 = (struct ibv_flow_spec_ipv4_ext) {
+	struct ibv_flow_spec_ipv4_ext ipv4 = {
 		.type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
 		.size = ipv4_size,
 	};
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	ipv4->val = (struct ibv_flow_ipv4_ext_filter){
-		.src_ip = spec->hdr.src_addr,
-		.dst_ip = spec->hdr.dst_addr,
-		.proto = spec->hdr.next_proto_id,
-		.tos = spec->hdr.type_of_service,
-	};
-	ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
-		.src_ip = mask->hdr.src_addr,
-		.dst_ip = mask->hdr.dst_addr,
-		.proto = mask->hdr.next_proto_id,
-		.tos = mask->hdr.type_of_service,
-	};
-	/* Remove unwanted bits from values. */
-	ipv4->val.src_ip &= ipv4->mask.src_ip;
-	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
-	ipv4->val.proto &= ipv4->mask.proto;
-	ipv4->val.tos &= ipv4->mask.tos;
+
+	parser->layer = HASH_RXQ_IPV4;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+			.src_ip = spec->hdr.src_addr,
+			.dst_ip = spec->hdr.dst_addr,
+			.proto = spec->hdr.next_proto_id,
+			.tos = spec->hdr.type_of_service,
+		};
+		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+			.src_ip = mask->hdr.src_addr,
+			.dst_ip = mask->hdr.dst_addr,
+			.proto = mask->hdr.next_proto_id,
+			.tos = mask->hdr.type_of_service,
+		};
+		/* Remove unwanted bits from values. */
+		ipv4.val.src_ip &= ipv4.mask.src_ip;
+		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+		ipv4.val.proto &= ipv4.mask.proto;
+		ipv4.val.tos &= ipv4.mask.tos;
+	}
+	mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
 	return 0;
 }
 
@@ -800,40 +1356,39 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv6 *spec = item->spec;
 	const struct rte_flow_item_ipv6 *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_ipv6 *ipv6;
 	unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
-	unsigned int i;
-
-	++parser->ibv_attr->num_of_specs;
-	parser->hash_fields = (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6);
-	ipv6 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*ipv6 = (struct ibv_flow_spec_ipv6) {
+	struct ibv_flow_spec_ipv6 ipv6 = {
 		.type = parser->inner | IBV_FLOW_SPEC_IPV6,
 		.size = ipv6_size,
 	};
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
-	       RTE_DIM(ipv6->val.src_ip));
-	memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
-	       RTE_DIM(ipv6->val.dst_ip));
-	memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
-	       RTE_DIM(ipv6->mask.src_ip));
-	memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
-	       RTE_DIM(ipv6->mask.dst_ip));
-	ipv6->mask.flow_label = mask->hdr.vtc_flow;
-	ipv6->mask.next_hdr = mask->hdr.proto;
-	ipv6->mask.hop_limit = mask->hdr.hop_limits;
-	/* Remove unwanted bits from values. */
-	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
-		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
-		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+
+	parser->layer = HASH_RXQ_IPV6;
+	if (spec) {
+		unsigned int i;
+
+		if (!mask)
+			mask = default_mask;
+		memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+		       RTE_DIM(ipv6.val.src_ip));
+		memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+		       RTE_DIM(ipv6.val.dst_ip));
+		memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+		       RTE_DIM(ipv6.mask.src_ip));
+		memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+		       RTE_DIM(ipv6.mask.dst_ip));
+		ipv6.mask.flow_label = mask->hdr.vtc_flow;
+		ipv6.mask.next_hdr = mask->hdr.proto;
+		ipv6.mask.hop_limit = mask->hdr.hop_limits;
+		/* Remove unwanted bits from values. */
+		for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+			ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+			ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+		}
+		ipv6.val.flow_label &= ipv6.mask.flow_label;
+		ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+		ipv6.val.hop_limit &= ipv6.mask.hop_limit;
 	}
-	ipv6->val.flow_label &= ipv6->mask.flow_label;
-	ipv6->val.next_hdr &= ipv6->mask.next_hdr;
-	ipv6->val.hop_limit &= ipv6->mask.hop_limit;
+	mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
 	return 0;
 }
 
@@ -855,28 +1410,28 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
 	const struct rte_flow_item_udp *spec = item->spec;
 	const struct rte_flow_item_udp *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_tcp_udp *udp;
 	unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
-
-	++parser->ibv_attr->num_of_specs;
-	parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
-				IBV_RX_HASH_DST_PORT_UDP);
-	udp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*udp = (struct ibv_flow_spec_tcp_udp) {
+	struct ibv_flow_spec_tcp_udp udp = {
 		.type = parser->inner | IBV_FLOW_SPEC_UDP,
 		.size = udp_size,
 	};
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	udp->val.dst_port = spec->hdr.dst_port;
-	udp->val.src_port = spec->hdr.src_port;
-	udp->mask.dst_port = mask->hdr.dst_port;
-	udp->mask.src_port = mask->hdr.src_port;
-	/* Remove unwanted bits from values. */
-	udp->val.src_port &= udp->mask.src_port;
-	udp->val.dst_port &= udp->mask.dst_port;
+
+	if (parser->layer == HASH_RXQ_IPV4)
+		parser->layer = HASH_RXQ_UDPV4;
+	else
+		parser->layer = HASH_RXQ_UDPV6;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		udp.val.dst_port = spec->hdr.dst_port;
+		udp.val.src_port = spec->hdr.src_port;
+		udp.mask.dst_port = mask->hdr.dst_port;
+		udp.mask.src_port = mask->hdr.src_port;
+		/* Remove unwanted bits from values. */
+		udp.val.src_port &= udp.mask.src_port;
+		udp.val.dst_port &= udp.mask.dst_port;
+	}
+	mlx5_flow_create_copy(parser, &udp, udp_size);
 	return 0;
 }
 
@@ -898,28 +1453,28 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
 	const struct rte_flow_item_tcp *spec = item->spec;
 	const struct rte_flow_item_tcp *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_tcp_udp *tcp;
 	unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
-
-	++parser->ibv_attr->num_of_specs;
-	parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
-				IBV_RX_HASH_DST_PORT_TCP);
-	tcp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*tcp = (struct ibv_flow_spec_tcp_udp) {
+	struct ibv_flow_spec_tcp_udp tcp = {
 		.type = parser->inner | IBV_FLOW_SPEC_TCP,
 		.size = tcp_size,
 	};
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	tcp->val.dst_port = spec->hdr.dst_port;
-	tcp->val.src_port = spec->hdr.src_port;
-	tcp->mask.dst_port = mask->hdr.dst_port;
-	tcp->mask.src_port = mask->hdr.src_port;
-	/* Remove unwanted bits from values. */
-	tcp->val.src_port &= tcp->mask.src_port;
-	tcp->val.dst_port &= tcp->mask.dst_port;
+
+	if (parser->layer == HASH_RXQ_IPV4)
+		parser->layer = HASH_RXQ_TCPV4;
+	else
+		parser->layer = HASH_RXQ_TCPV6;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		tcp.val.dst_port = spec->hdr.dst_port;
+		tcp.val.src_port = spec->hdr.src_port;
+		tcp.mask.dst_port = mask->hdr.dst_port;
+		tcp.mask.src_port = mask->hdr.src_port;
+		/* Remove unwanted bits from values. */
+		tcp.val.src_port &= tcp.mask.src_port;
+		tcp.val.dst_port &= tcp.mask.dst_port;
+	}
+	mlx5_flow_create_copy(parser, &tcp, tcp_size);
 	return 0;
 }
 
@@ -941,31 +1496,29 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 	const struct rte_flow_item_vxlan *spec = item->spec;
 	const struct rte_flow_item_vxlan *mask = item->mask;
 	struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
-	struct ibv_flow_spec_tunnel *vxlan;
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+	struct ibv_flow_spec_tunnel vxlan = {
+		.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+		.size = size,
+	};
 	union vni {
 		uint32_t vlan_id;
 		uint8_t vni[4];
 	} id;
 
-	++parser->ibv_attr->num_of_specs;
 	id.vni[0] = 0;
-	vxlan = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*vxlan = (struct ibv_flow_spec_tunnel) {
-		.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
-		.size = size,
-	};
 	parser->inner = IBV_FLOW_SPEC_INNER;
-	if (!spec)
-		return 0;
-	if (!mask)
-		mask = default_mask;
-	memcpy(&id.vni[1], spec->vni, 3);
-	vxlan->val.tunnel_id = id.vlan_id;
-	memcpy(&id.vni[1], mask->vni, 3);
-	vxlan->mask.tunnel_id = id.vlan_id;
-	/* Remove unwanted bits from values. */
-	vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		memcpy(&id.vni[1], spec->vni, 3);
+		vxlan.val.tunnel_id = id.vlan_id;
+		memcpy(&id.vni[1], mask->vni, 3);
+		vxlan.mask.tunnel_id = id.vlan_id;
+		/* Remove unwanted bits from values. */
+		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+	}
+	mlx5_flow_create_copy(parser, &vxlan, size);
 	return 0;
 }
 
@@ -980,18 +1533,15 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 static int
 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
 {
-	struct ibv_flow_spec_action_tag *tag;
 	unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
-
-	assert(parser->mark);
-	tag = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
-	*tag = (struct ibv_flow_spec_action_tag){
+	struct ibv_flow_spec_action_tag tag = {
 		.type = IBV_FLOW_SPEC_ACTION_TAG,
 		.size = size,
 		.tag_id = mlx5_flow_mark_set(mark_id),
 	};
-	++parser->ibv_attr->num_of_specs;
-	parser->offset += size;
+
+	assert(parser->mark);
+	mlx5_flow_create_copy(parser, &tag, size);
 	return 0;
 }
 
@@ -1002,196 +1552,188 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
  *   Pointer to private structure.
  * @param parser
  *   Internal parser structure.
+ * @param flow
+ *   Pointer to the rte_flow.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A flow if the rule could be created.
+ *   0 on success, errno value on failure.
  */
-static struct rte_flow *
+static int
 priv_flow_create_action_queue_drop(struct priv *priv,
 				   struct mlx5_flow_parse *parser,
+				   struct rte_flow *flow,
 				   struct rte_flow_error *error)
 {
-	struct rte_flow *rte_flow;
 	struct ibv_flow_spec_action_drop *drop;
 	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+	int err = 0;
 
 	assert(priv->pd);
 	assert(priv->ctx);
-	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
-	if (!rte_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate flow memory");
-		return NULL;
-	}
-	rte_flow->drop = 1;
-	drop = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
+	flow->drop = 1;
+	drop = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+			parser->drop_q.offset);
 	*drop = (struct ibv_flow_spec_action_drop){
 			.type = IBV_FLOW_SPEC_ACTION_DROP,
 			.size = size,
 	};
-	++parser->ibv_attr->num_of_specs;
-	parser->offset += sizeof(struct ibv_flow_spec_action_drop);
-	rte_flow->drxq.ibv_attr = parser->ibv_attr;
+	++parser->drop_q.ibv_attr->num_of_specs;
+	parser->drop_q.offset += size;
 	if (!priv->dev->data->dev_started)
-		return rte_flow;
-	rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
-	rte_flow->drxq.ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
-						  rte_flow->drxq.ibv_attr);
-	if (!rte_flow->drxq.ibv_flow) {
+		return 0;
+	flow->drxq.ibv_attr = parser->drop_q.ibv_attr;
+	parser->drop_q.ibv_attr = NULL;
+	flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
+					      flow->drxq.ibv_attr);
+	if (!flow->drxq.ibv_flow) {
 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "flow rule creation failure");
+		err = ENOMEM;
 		goto error;
 	}
-	return rte_flow;
+	return 0;
 error:
-	assert(rte_flow);
-	rte_free(rte_flow);
-	return NULL;
+	assert(flow);
+	if (flow->drxq.ibv_flow) {
+		claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+		flow->drxq.ibv_flow = NULL;
+	}
+	if (flow->drxq.ibv_attr) {
+		rte_free(flow->drxq.ibv_attr);
+		flow->drxq.ibv_attr = NULL;
+	}
+	return err;
 }
 
 /**
- * Complete flow rule creation.
+ * Create hash Rx queues when RSS is enabled.
  *
  * @param priv
  *   Pointer to private structure.
  * @param parser
- *   MLX5 flow parser attributes (filled by mlx5_flow_validate()).
+ *   Internal parser structure.
+ * @param flow
+ *   Pointer to the rte_flow.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A flow if the rule could be created.
+ *   0 on success, a errno value otherwise and rte_errno is set.
  */
-static struct rte_flow *
-priv_flow_create_action_queue(struct priv *priv,
-			      struct mlx5_flow_parse *parser,
-			      struct rte_flow_error *error)
+static int
+priv_flow_create_action_queue_rss(struct priv *priv,
+				  struct mlx5_flow_parse *parser,
+				  struct rte_flow *flow,
+				  struct rte_flow_error *error)
 {
-	struct rte_flow *rte_flow;
 	unsigned int i;
 
-	assert(priv->pd);
-	assert(priv->ctx);
-	assert(!parser->drop);
-	rte_flow = rte_calloc(__func__, 1,
-			      sizeof(*rte_flow) +
-			      parser->queues_n * sizeof(uint16_t),
-			      0);
-	if (!rte_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate flow memory");
-		return NULL;
-	}
-	rte_flow->mark = parser->mark;
-	rte_flow->frxq.ibv_attr = parser->ibv_attr;
-	rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
-	memcpy(rte_flow->queues, parser->queues,
-	       parser->queues_n * sizeof(uint16_t));
-	rte_flow->queues_n = parser->queues_n;
-	rte_flow->frxq.hash_fields = parser->hash_fields;
-	rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
-						 rss_hash_default_key_len,
-						 parser->hash_fields,
-						 (*rte_flow->queues),
-						 rte_flow->queues_n);
-	if (!rte_flow->frxq.hrxq) {
-		rte_flow->frxq.hrxq =
-			mlx5_priv_hrxq_new(priv, rss_hash_default_key,
-					   rss_hash_default_key_len,
-					   parser->hash_fields,
-					   (*rte_flow->queues),
-					   rte_flow->queues_n);
-		if (!rte_flow->frxq.hrxq) {
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		uint64_t hash_fields;
+
+		if (!parser->queue[i].ibv_attr)
+			continue;
+		flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
+		parser->queue[i].ibv_attr = NULL;
+		hash_fields = hash_rxq_init[i].hash_fields;
+		flow->frxq[i].hrxq =
+			mlx5_priv_hrxq_get(priv,
+					   parser->rss_conf.rss_key,
+					   parser->rss_conf.rss_key_len,
+					   hash_fields,
+					   parser->queues,
+					   hash_fields ? parser->queues_n : 1);
+		if (flow->frxq[i].hrxq)
+			continue;
+		flow->frxq[i].hrxq =
+			mlx5_priv_hrxq_new(priv,
+					   parser->rss_conf.rss_key,
+					   parser->rss_conf.rss_key_len,
+					   hash_fields,
+					   parser->queues,
+					   hash_fields ? parser->queues_n : 1);
+		if (!flow->frxq[i].hrxq) {
 			rte_flow_error_set(error, ENOMEM,
 					   RTE_FLOW_ERROR_TYPE_HANDLE,
 					   NULL, "cannot create hash rxq");
-			goto error;
+			return ENOMEM;
 		}
 	}
-	for (i = 0; i != parser->queues_n; ++i) {
-		struct mlx5_rxq_data *q =
-			(*priv->rxqs)[parser->queues[i]];
-
-		q->mark |= parser->mark;
-	}
-	if (!priv->dev->data->dev_started)
-		return rte_flow;
-	rte_flow->frxq.ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
-						  rte_flow->frxq.ibv_attr);
-	if (!rte_flow->frxq.ibv_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "flow rule creation failure");
-		goto error;
-	}
-	return rte_flow;
-error:
-	assert(rte_flow);
-	if (rte_flow->frxq.hrxq)
-		mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
-	rte_free(rte_flow);
-	return NULL;
+	return 0;
 }
 
 /**
- * Validate a flow.
+ * Complete flow rule creation.
  *
  * @param priv
  *   Pointer to private structure.
- * @param[in] attr
- *   Flow rule attributes.
- * @param[in] pattern
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
+ * @param parser
+ *   Internal parser structure.
+ * @param flow
+ *   Pointer to the rte_flow.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
- * @param[in,out] parser
- *   MLX5 parser structure.
  *
  * @return
- *   0 on success, negative errno value on failure.
+ *   0 on success, a errno value otherwise and rte_errno is set.
  */
 static int
-priv_flow_validate(struct priv *priv,
-		   const struct rte_flow_attr *attr,
-		   const struct rte_flow_item items[],
-		   const struct rte_flow_action actions[],
-		   struct rte_flow_error *error,
-		   struct mlx5_flow_parse *parser)
+priv_flow_create_action_queue(struct priv *priv,
+			      struct mlx5_flow_parse *parser,
+			      struct rte_flow *flow,
+			      struct rte_flow_error *error)
 {
-	int err;
+	int err = 0;
+	unsigned int i;
 
-	err = priv_flow_convert(priv, attr, items, actions, error, parser);
+	assert(priv->pd);
+	assert(priv->ctx);
+	assert(!parser->drop);
+	err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
 	if (err)
-		goto exit;
-	if (parser->mark)
-		parser->offset += sizeof(struct ibv_flow_spec_action_tag);
-	parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
-	if (!parser->ibv_attr) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate ibv_attr memory");
-		err = rte_errno;
-		goto exit;
+		goto error;
+	if (!priv->dev->data->dev_started)
+		return 0;
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (!flow->frxq[i].hrxq)
+			continue;
+		flow->frxq[i].ibv_flow =
+			ibv_create_flow(flow->frxq[i].hrxq->qp,
+					flow->frxq[i].ibv_attr);
+		if (!flow->frxq[i].ibv_flow) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "flow rule creation failure");
+			err = ENOMEM;
+			goto error;
+		}
+		DEBUG("%p type %d QP %p ibv_flow %p",
+		      (void *)flow, i,
+		      (void *)flow->frxq[i].hrxq,
+		      (void *)flow->frxq[i].ibv_flow);
+	}
+	for (i = 0; i != parser->queues_n; ++i) {
+		struct mlx5_rxq_data *q =
+			(*priv->rxqs)[parser->queues[i]];
+
+		q->mark |= parser->mark;
 	}
-	*parser->ibv_attr = (struct ibv_flow_attr){
-		.type = IBV_FLOW_ATTR_NORMAL,
-		.size = sizeof(struct ibv_flow_attr),
-		.priority = attr->priority,
-		.num_of_specs = 0,
-		.port = 0,
-		.flags = 0,
-	};
-	err = priv_flow_convert(priv, attr, items, actions, error, parser);
-	if (err || parser->create)
-		goto exit;
-	if (parser->mark)
-		mlx5_flow_create_flag_mark(parser, parser->mark_id);
 	return 0;
-exit:
-	if (parser->ibv_attr)
-		rte_free(parser->ibv_attr);
+error:
+	assert(flow);
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (flow->frxq[i].ibv_flow) {
+			struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
+
+			claim_zero(ibv_destroy_flow(ibv_flow));
+		}
+		if (flow->frxq[i].hrxq)
+			mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+		if (flow->frxq[i].ibv_attr)
+			rte_free(flow->frxq[i].ibv_attr);
+	}
 	return err;
 }
 
@@ -1223,24 +1765,52 @@ priv_flow_create(struct priv *priv,
 		 struct rte_flow_error *error)
 {
 	struct mlx5_flow_parse parser = { .create = 1, };
-	struct rte_flow *flow;
+	struct rte_flow *flow = NULL;
+	unsigned int i;
 	int err;
 
-	err = priv_flow_validate(priv, attr, items, actions, error, &parser);
+	err = priv_flow_convert(priv, attr, items, actions, error, &parser);
 	if (err)
 		goto exit;
+	flow = rte_calloc(__func__, 1,
+			  sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
+			  0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "cannot allocate flow memory");
+		return NULL;
+	}
+	/* Copy queues configuration. */
+	flow->queues = (uint16_t (*)[])(flow + 1);
+	memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
+	flow->queues_n = parser.queues_n;
+	/* Copy RSS configuration. */
+	flow->rss_conf = parser.rss_conf;
+	flow->rss_conf.rss_key = flow->rss_key;
+	memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
+	/* finalise the flow. */
 	if (parser.drop)
-		flow = priv_flow_create_action_queue_drop(priv, &parser, error);
+		err = priv_flow_create_action_queue_drop(priv, &parser, flow,
+							 error);
 	else
-		flow = priv_flow_create_action_queue(priv, &parser, error);
-	if (!flow)
+		err = priv_flow_create_action_queue(priv, &parser, flow, error);
+	if (err)
 		goto exit;
 	TAILQ_INSERT_TAIL(list, flow, next);
 	DEBUG("Flow created %p", (void *)flow);
 	return flow;
 exit:
-	if (parser.ibv_attr)
-		rte_free(parser.ibv_attr);
+	if (parser.drop) {
+		rte_free(parser.drop_q.ibv_attr);
+	} else {
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (parser.queue[i].ibv_attr)
+				rte_free(parser.queue[i].ibv_attr);
+		}
+	}
+	rte_free(flow);
 	return NULL;
 }
 
@@ -1262,7 +1832,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 	struct mlx5_flow_parse parser = { .create = 0, };
 
 	priv_lock(priv);
-	ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
+	ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
 	priv_unlock(priv);
 	return ret;
 }
@@ -1306,16 +1876,11 @@ priv_flow_destroy(struct priv *priv,
 		  struct rte_flow *flow)
 {
 	unsigned int i;
-	uint16_t *queues;
-	uint16_t queues_n;
 
 	if (flow->drop || !flow->mark)
 		goto free;
-	queues = flow->frxq.hrxq->ind_table->queues;
-	queues_n = flow->frxq.hrxq->ind_table->queues_n;
-	for (i = 0; i != queues_n; ++i) {
+	for (i = 0; i != flow->queues_n; ++i) {
 		struct rte_flow *tmp;
-		struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
 		int mark = 0;
 
 		/*
@@ -1324,18 +1889,24 @@ priv_flow_destroy(struct priv *priv,
 		 */
 		TAILQ_FOREACH(tmp, list, next) {
 			unsigned int j;
+			uint16_t *tqs = NULL;
+			uint16_t tq_n = 0;
 
 			if (!tmp->mark)
 				continue;
-			for (j = 0;
-			     (j != tmp->frxq.hrxq->ind_table->queues_n) &&
-			     !mark;
-			     j++)
-				if (tmp->frxq.hrxq->ind_table->queues[j] ==
-				    queues[i])
+			for (j = 0; j != hash_rxq_init_n; ++j) {
+				if (!tmp->frxq[j].hrxq)
+					continue;
+				tqs = tmp->frxq[j].hrxq->ind_table->queues;
+				tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
+			}
+			if (!tq_n)
+				continue;
+			for (j = 0; (j != tq_n) && !mark; j++)
+				if (tqs[j] == (*flow->queues)[i])
 					mark = 1;
 		}
-		rxq_data->mark = mark;
+		(*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
 	}
 free:
 	if (flow->drop) {
@@ -1343,10 +1914,16 @@ priv_flow_destroy(struct priv *priv,
 			claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
 		rte_free(flow->drxq.ibv_attr);
 	} else {
-		mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
-		if (flow->frxq.ibv_flow)
-			claim_zero(ibv_destroy_flow(flow->frxq.ibv_flow));
-		rte_free(flow->frxq.ibv_attr);
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			struct mlx5_flow *frxq = &flow->frxq[i];
+
+			if (frxq->ibv_flow)
+				claim_zero(ibv_destroy_flow(frxq->ibv_flow));
+			if (frxq->hrxq)
+				mlx5_priv_hrxq_release(priv, frxq->hrxq);
+			if (frxq->ibv_attr)
+				rte_free(frxq->ibv_attr);
+		}
 	}
 	TAILQ_REMOVE(list, flow, next);
 	DEBUG("Flow destroyed %p", (void *)flow);
@@ -1497,18 +2074,35 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
 	struct rte_flow *flow;
 
 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
-		assert(!flow->drop);
-		claim_zero(ibv_destroy_flow(flow->frxq.ibv_flow));
-		flow->frxq.ibv_flow = NULL;
-		mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
-		flow->frxq.hrxq = NULL;
+		unsigned int i;
+
+		if (flow->drop) {
+			if (!flow->drxq.ibv_flow)
+				continue;
+			claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+			flow->drxq.ibv_flow = NULL;
+			/* Next flow. */
+			continue;
+		}
 		if (flow->mark) {
-			unsigned int n;
-			struct mlx5_ind_table_ibv *ind_tbl =
-				flow->frxq.hrxq->ind_table;
+			struct mlx5_ind_table_ibv *ind_tbl = NULL;
 
-			for (n = 0; n < ind_tbl->queues_n; ++n)
-				(*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
+			for (i = 0; i != hash_rxq_init_n; ++i) {
+				if (!flow->frxq[i].hrxq)
+					continue;
+				ind_tbl = flow->frxq[i].hrxq->ind_table;
+			}
+			assert(ind_tbl);
+			for (i = 0; i != ind_tbl->queues_n; ++i)
+				(*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
+		}
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!flow->frxq[i].ibv_flow)
+				continue;
+			claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+			flow->frxq[i].ibv_flow = NULL;
+			mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+			flow->frxq[i].hrxq = NULL;
 		}
 		DEBUG("Flow %p removed", (void *)flow);
 	}
@@ -1531,48 +2125,61 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
 	struct rte_flow *flow;
 
 	TAILQ_FOREACH(flow, list, next) {
-		if (flow->frxq.hrxq)
-			goto flow_create;
-		flow->frxq.hrxq =
-			mlx5_priv_hrxq_get(priv, rss_hash_default_key,
-					   rss_hash_default_key_len,
-					   flow->frxq.hash_fields,
-					   (*flow->queues),
-					   flow->queues_n);
-		if (flow->frxq.hrxq)
-			goto flow_create;
-		flow->frxq.hrxq =
-			mlx5_priv_hrxq_new(priv, rss_hash_default_key,
-					   rss_hash_default_key_len,
-					   flow->frxq.hash_fields,
-					   (*flow->queues),
-					   flow->queues_n);
-		if (!flow->frxq.hrxq) {
-			DEBUG("Flow %p cannot be applied",
-			      (void *)flow);
-			rte_errno = EINVAL;
-			return rte_errno;
+		unsigned int i;
+
+		if (flow->drop) {
+			flow->drxq.ibv_flow =
+				ibv_create_flow(priv->flow_drop_queue->qp,
+						flow->drxq.ibv_attr);
+			if (!flow->drxq.ibv_flow) {
+				DEBUG("Flow %p cannot be applied",
+				      (void *)flow);
+				rte_errno = EINVAL;
+				return rte_errno;
+			}
+			DEBUG("Flow %p applied", (void *)flow);
+			/* Next flow. */
+			continue;
 		}
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!flow->frxq[i].ibv_attr)
+				continue;
+			flow->frxq[i].hrxq =
+				mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
+						   flow->rss_conf.rss_key_len,
+						   hash_rxq_init[i].hash_fields,
+						   (*flow->queues),
+						   flow->queues_n);
+			if (flow->frxq[i].hrxq)
+				goto flow_create;
+			flow->frxq[i].hrxq =
+				mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
+						   flow->rss_conf.rss_key_len,
+						   hash_rxq_init[i].hash_fields,
+						   (*flow->queues),
+						   flow->queues_n);
+			if (!flow->frxq[i].hrxq) {
+				DEBUG("Flow %p cannot be applied",
+				      (void *)flow);
+				rte_errno = EINVAL;
+				return rte_errno;
+			}
 flow_create:
-		flow->frxq.ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
-						      flow->frxq.ibv_attr);
-		if (!flow->frxq.ibv_flow) {
-			DEBUG("Flow %p cannot be applied", (void *)flow);
-			rte_errno = EINVAL;
-			return rte_errno;
-		}
-		DEBUG("Flow %p applied", (void *)flow);
-		if (flow->mark) {
-			unsigned int n;
-
-			for (n = 0;
-			     n < flow->frxq.hrxq->ind_table->queues_n;
-			     ++n) {
-				uint16_t idx =
-					flow->frxq.hrxq->ind_table->queues[n];
-				(*priv->rxqs)[idx]->mark = 1;
+			flow->frxq[i].ibv_flow =
+				ibv_create_flow(flow->frxq[i].hrxq->qp,
+						flow->frxq[i].ibv_attr);
+			if (!flow->frxq[i].ibv_flow) {
+				DEBUG("Flow %p cannot be applied",
+				      (void *)flow);
+				rte_errno = EINVAL;
+				return rte_errno;
 			}
+			DEBUG("Flow %p applied", (void *)flow);
 		}
+		if (!flow->mark)
+			continue;
+		for (i = 0; i != flow->queues_n; ++i)
+			(*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
 	}
 	return 0;
 }
@@ -1648,10 +2255,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	};
 	struct rte_flow_action actions[] = {
 		{
-			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
-			.conf = &(struct rte_flow_action_queue){
-				.index = 0,
-			},
+			.type = RTE_FLOW_ACTION_TYPE_RSS,
 		},
 		{
 			.type = RTE_FLOW_ACTION_TYPE_END,
@@ -1659,7 +2263,23 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	};
 	struct rte_flow *flow;
 	struct rte_flow_error error;
-
+	unsigned int i;
+	union {
+		struct rte_flow_action_rss rss;
+		struct {
+			const struct rte_eth_rss_conf *rss_conf;
+			uint16_t num;
+			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+		} local;
+	} action_rss;
+
+	if (!priv->reta_idx_n)
+		return EINVAL;
+	for (i = 0; i != priv->reta_idx_n; ++i)
+		action_rss.local.queue[i] = (*priv->reta_idx)[i];
+	action_rss.local.rss_conf = &priv->rss_conf;
+	action_rss.local.num = priv->reta_idx_n;
+	actions[0].conf = (const void *)&action_rss.rss;
 	flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
 				&error);
 	if (!flow)
-- 
2.1.4

  parent reply	other threads:[~2017-10-05 12:50 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-02 14:10 [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 01/21] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
     [not found] ` <cover.1501681913.git.nelio.laranjeiro@6wind.com>
2017-08-02 14:10   ` [dpdk-dev] [PATCH v1] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 02/21] net/mlx5: remove flow director support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 03/21] net/mlx5: prefix Rx queue control structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 04/21] net/mlx5: prefix Tx control queue structures Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 05/21] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 06/21] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 07/21] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 08/21] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 09/21] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 10/21] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 11/21] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 12/21] net/mlx5: remove queue drop support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 13/21] net/mlx5: make indirection tables sharable Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 14/21] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 15/21] net/mlx5: disable priority protection in flows Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 16/21] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 17/21] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 18/21] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 19/21] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 20/21] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-08-02 14:10 ` [dpdk-dev] [PATCH v1 21/21] net/mlx5: support RSS hash configuration in generic flow action Nelio Laranjeiro
2017-08-18 13:44 ` [dpdk-dev] [PATCH v1 00/21] net/mlx5: cleanup for isolated mode Ferruh Yigit
2017-08-22  9:15   ` Nélio Laranjeiro
2017-10-05 12:49 ` [dpdk-dev] [PATCH v2 00/30] " Nelio Laranjeiro
2017-10-05 19:14   ` Ferruh Yigit
     [not found] ` <cover.1507207731.git.nelio.laranjeiro@6wind.com>
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-06  0:47     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-06  0:49     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 04/30] net/mlx5: prefix Tx " Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-06  0:50     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-06  0:51     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-06  1:11     ` Yongseok Koh
2017-10-06  8:30       ` Nélio Laranjeiro
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-06  3:26     ` Yongseok Koh
2017-10-06  8:52       ` Nélio Laranjeiro
2017-10-06 22:57         ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-06  3:32     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-06  3:51     ` Yongseok Koh
2017-10-09 18:33     ` Ferruh Yigit
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-06  3:56     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 14/30] net/mlx5: make indirection tables shareable Nelio Laranjeiro
2017-10-06  4:08     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-06  4:59     ` Yongseok Koh
2017-10-06  7:03       ` Nélio Laranjeiro
2017-10-06 22:50         ` Yongseok Koh
2017-10-09  8:05           ` Nélio Laranjeiro
2017-10-09 13:48             ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-06  5:01     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-06  5:07     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-06  5:10     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-06  5:18     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-06  5:23     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-06  5:27     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-06  5:33     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-06  5:36     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-06  5:37     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-06  5:41     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-06  5:42     ` Yongseok Koh
2017-10-05 12:49   ` [dpdk-dev] [PATCH v2 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-06  5:44     ` Yongseok Koh
2017-10-05 12:50   ` Nelio Laranjeiro [this message]
2017-10-06 17:30     ` [dpdk-dev] [PATCH v2 28/30] net/mlx5: handle RSS hash configuration in RSS flow Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-06  5:46     ` Yongseok Koh
2017-10-05 12:50   ` [dpdk-dev] [PATCH v2 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro
2017-10-06  5:48     ` Yongseok Koh
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 00/30] net/mlx5: cleanup " Nelio Laranjeiro
2017-10-09 17:17   ` Yongseok Koh
2017-10-09 18:35     ` Ferruh Yigit
2017-10-10  6:55       ` Nélio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 01/30] net/mlx5: merge action and flow parser structure Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 02/30] net/mlx5: remove flow director support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 03/30] net/mlx5: prefix Rx structures and functions Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 04/30] net/mlx5: prefix Tx " Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 05/30] net/mlx5: remove redundant started flag Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 06/30] net/mlx5: verify all flows are been removed on close Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 07/30] net/mlx5: fix reta update can segfault Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 08/30] net/mlx5: fix rxqs vector support verification Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 09/30] net/mlx5: add reference counter on memory region Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 10/30] net/mlx5: separate DPDK from Verbs Rx queue objects Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 11/30] net/mlx5: separate DPDK from Verbs Tx " Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 12/30] net/mlx5: add reference counter on DPDK Tx queues Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 13/30] net/mlx5: add reference counter on DPDK Rx queues Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 14/30] net/mlx5: make indirection tables shareable Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 15/30] net/mlx5: add Hash Rx queue object Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 16/30] net/mlx5: fix clang compilation error Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 17/30] net/mlx5: use flow to enable promiscuous mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 18/30] net/mlx5: use flow to enable all multi mode Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 19/30] net/mlx5: use flow to enable unicast traffic Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 20/30] net/mlx5: handle a single RSS hash key for all protocols Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 21/30] net/mlx5: remove hash Rx queues support Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 22/30] net/mlx5: fully convert a flow to verbs in validate Nelio Laranjeiro
2017-10-09 14:44 ` [dpdk-dev] [PATCH v3 23/30] net/mlx5: process flows actions before of items Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 24/30] net/mlx5: merge internal parser and actions structures Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 25/30] net/mlx5: use a better name for the flow parser Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 26/30] net/mlx5: reorganise functions in the file Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 27/30] net/mlx5: move Verbs flows and attributes Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 28/30] net/mlx5: handle RSS hash configuration in RSS flow Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 29/30] net/mlx5: support flow director Nelio Laranjeiro
2017-10-09 14:45 ` [dpdk-dev] [PATCH v3 30/30] net/mlx5: add new operations for isolated mode Nelio Laranjeiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8f61b0656eab8f398c065b72f2496dc785fde0f8.1507207731.git.nelio.laranjeiro@6wind.com \
    --to=nelio.laranjeiro@6wind.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).