DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules
@ 2020-02-03 13:32 Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
                   ` (6 more replies)
  0 siblings, 7 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

This patch set will remove the flow rules cache and move to the 
non-cached mode for DV mode. For Verbs mode flow rules, the behavior
will remain the same.
In the device closing stage, all the software resources for flows
created will be freed and corresponding hardware resources will be
released. Then the total cost of the memory will be reduced and the
behavior of mlx5 PMD will comply fully with the ethdev API
expectations.
After closing a device, all the flow rules stored in application
layer will no longer be valid anymore. Application should synchronize
the database and do not try to destory any rule on this device.
And after a device restarting, all the needed flow rules should be
reinserted via the create routine in the rte_flow lib.

Bing Zhao (6):
  net/mlx5: introduce non-cached flows tailq list
  net/mlx5: change operations of non-cached flows
  net/mlx5: flow type check before creating
  net/mlx5: introduce handle structure for DV flows
  net/mlx5: remove the DV support macro checking
  net/mlx5: do not save device flow matcher value

 drivers/net/mlx5/mlx5.c         |   4 +-
 drivers/net/mlx5/mlx5.h         |   5 +-
 drivers/net/mlx5/mlx5_flow.c    | 246 ++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_flow.h    |  44 +++++-
 drivers/net/mlx5/mlx5_flow_dv.c | 328 ++++++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_trigger.c |  11 +-
 6 files changed, 417 insertions(+), 221 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

A new tailq head is introduced in the mlx5 private structure for each
device. Then all the flows created by user are moved into this tailq
list. This is the first stage to separate the flows with DV mode from
the flows with Verbs mode.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  3 ++-
 drivers/net/mlx5/mlx5.h         |  3 ++-
 drivers/net/mlx5/mlx5_flow.c    | 16 ++++++++--------
 drivers/net/mlx5/mlx5_trigger.c |  6 +++---
 4 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index f80e403..dc4fbbc 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2674,7 +2674,8 @@ struct mlx5_flow_id_pool *
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
 				      MLX5_MAX_MAC_ADDRESSES);
-	TAILQ_INIT(&priv->flows);
+	TAILQ_INIT(&priv->cached_flows);
+	TAILQ_INIT(&priv->noncached_flows);
 	TAILQ_INIT(&priv->ctrl_flows);
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d7c519b..65bdb3b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -515,7 +515,8 @@ struct mlx5_priv {
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
-	struct mlx5_flows flows; /* RTE Flow rules. */
+	struct mlx5_flows cached_flows; /* cached RTE Flow rules. */
+	struct mlx5_flows noncached_flows; /* non-cached RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 144e07c..d7fb094 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4357,7 +4357,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	return flow_list_create(dev, &priv->flows,
+	return flow_list_create(dev, &priv->noncached_flows,
 				attr, items, actions, true, error);
 }
 
@@ -4490,7 +4490,7 @@ struct rte_flow *
 	struct rte_flow *flow;
 	int ret = 0;
 
-	TAILQ_FOREACH(flow, &priv->flows, next) {
+	TAILQ_FOREACH(flow, &priv->noncached_flows, next) {
 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
 			dev->data->port_id, (void *)flow);
 		++ret;
@@ -4674,7 +4674,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_list_destroy(dev, &priv->noncached_flows, flow);
 	return 0;
 }
 
@@ -4690,7 +4690,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 	return 0;
 }
 
@@ -5004,7 +5004,7 @@ struct rte_flow *
 	struct rte_flow *flow = NULL;
 
 	MLX5_ASSERT(fdir_flow);
-	TAILQ_FOREACH(flow, &priv->flows, next) {
+	TAILQ_FOREACH(flow, &priv->noncached_flows, next) {
 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
 				dev->data->port_id, (void *)flow);
@@ -5047,7 +5047,7 @@ struct rte_flow *
 		rte_errno = EEXIST;
 		goto error;
 	}
-	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+	flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr,
 				fdir_flow->items, fdir_flow->actions, true,
 				NULL);
 	if (!flow)
@@ -5092,7 +5092,7 @@ struct rte_flow *
 		rte_errno = ENOENT;
 		return -rte_errno;
 	}
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_list_destroy(dev, &priv->noncached_flows, flow);
 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
 		dev->data->port_id, (void *)flow);
 	return 0;
@@ -5132,7 +5132,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index be47df5..0053847 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -320,7 +320,7 @@
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->flows);
+	ret = mlx5_flow_start(dev, &priv->noncached_flows);
 	if (ret) {
 		DRV_LOG(DEBUG, "port %u failed to set flows",
 			dev->data->port_id);
@@ -337,7 +337,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop(dev, &priv->noncached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -367,7 +367,7 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop(dev, &priv->noncached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating Bing Zhao
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

When stopping a mlx5 device, the flows with non-cached mode will be
flushed. So no operation will be done for these flows in the device
closing stage.
If the device restarts after stopped, no flow with non-cached mode
will be reinserted.
Operations of flows with cached mode remain the same. And when the
flushing is called from user, all the flows will be flushed.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  1 +
 drivers/net/mlx5/mlx5.h         |  2 ++
 drivers/net/mlx5/mlx5_flow.c    | 36 +++++++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_trigger.c | 11 ++++++++---
 4 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index dc4fbbc..5114b23 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1236,6 +1236,7 @@ struct mlx5_flow_id_pool *
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_dev_interrupt_handler_devx_uninstall(dev);
 	mlx5_traffic_disable(dev);
+	/* Only cached flows will be flushed in this stage, if any. */
 	mlx5_flow_flush(dev, NULL);
 	mlx5_flow_meter_flush(dev, NULL);
 	/* Prevent crashes when queues are still in use. */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 65bdb3b..d749b29 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -715,6 +715,8 @@ int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
 void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int mlx5_flow_flush_noncached(struct rte_eth_dev *dev,
+			      struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
 		    struct rte_flow_error *error);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index d7fb094..0560874 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4453,11 +4453,14 @@ struct rte_flow *
 	struct rte_flow_error error;
 	int ret = 0;
 
-	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+	/*
+	 * Make sure default copy action (reg_c[0] -> reg_b) is created.
+	 * This should always be executed no matter the driver type.
+	 */
 	ret = flow_mreg_add_default_copy_action(dev, &error);
 	if (ret < 0)
 		return -rte_errno;
-	/* Apply Flows created by application. */
+	/* Apply Flows created by application, only for cached flows. */
 	TAILQ_FOREACH(flow, list, next) {
 		ret = flow_mreg_start_copy_action(dev, flow);
 		if (ret < 0)
@@ -4674,7 +4677,15 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	flow_list_destroy(dev, &priv->noncached_flows, flow);
+	/*
+	 * Checking the flow type and then destroying the flows in both lists.
+	 * Flow with DV type is non-cached (most cases) and flow with legacy
+	 * verbs mode is still cached right now.
+	 */
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+		flow_list_destroy(dev, &priv->noncached_flows, flow);
+	else
+		flow_list_destroy(dev, &priv->cached_flows, flow);
 	return 0;
 }
 
@@ -4690,6 +4701,24 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
+	/* In most cases, only one tailq list will contain the flows. */
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
+	mlx5_flow_list_flush(dev, &priv->cached_flows);
+	return 0;
+}
+
+/**
+ * Destroy all non-cached flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush_noncached(struct rte_eth_dev *dev,
+			  struct rte_flow_error *error __rte_unused)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
 	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 	return 0;
 }
@@ -5133,6 +5162,7 @@ struct rte_flow *
 	struct mlx5_priv *priv = dev->data->dev_private;
 
 	mlx5_flow_list_flush(dev, &priv->noncached_flows);
+	mlx5_flow_list_flush(dev, &priv->cached_flows);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 0053847..26f4863 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -320,7 +320,7 @@
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->noncached_flows);
+	ret = mlx5_flow_start(dev, &priv->cached_flows);
 	if (ret) {
 		DRV_LOG(DEBUG, "port %u failed to set flows",
 			dev->data->port_id);
@@ -337,7 +337,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->noncached_flows);
+	mlx5_flow_stop(dev, &priv->cached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -367,7 +367,12 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->noncached_flows);
+	mlx5_flow_stop(dev, &priv->cached_flows);
+	/*
+	 * Flows flushing is still after deleting default copy action & clearing
+	 * flags of all RX queues.
+	 */
+	mlx5_flow_flush_noncached(dev, NULL);
 	mlx5_traffic_disable(dev);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

When creating a flow, the driver mode needs to be checked in order
to call the corresponding functions. Now the driver mode checking
part is moved out of the flow creating function, then the flow could
be added into the correct tailq list.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 40 ++++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0560874..8fb973b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2874,8 +2874,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
-		 const struct rte_flow_action actions[],
-		 bool external, struct rte_flow_error *error);
+		 const struct rte_flow_action actions[], bool external,
+		 enum mlx5_flow_drv_type type, struct rte_flow_error *error);
 
 static void
 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
@@ -3015,7 +3015,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * by list traversing.
 	 */
 	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
-					 actions, false, error);
+					 actions, false,
+					 flow_get_drv_type(dev, &attr), error);
 	if (!mcp_res->flow)
 		goto error;
 	mcp_res->refcnt++;
@@ -4119,6 +4120,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Associated actions (list terminated by the END action).
  * @param[in] external
  *   This flow rule is created by request external to PMD.
+ * @param[in] type
+ *   Flow rule type, DV or VERBS.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -4129,8 +4132,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
-		 const struct rte_flow_action actions[],
-		 bool external, struct rte_flow_error *error)
+		 const struct rte_flow_action actions[], bool external,
+		 enum mlx5_flow_drv_type type, struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = NULL;
@@ -4188,7 +4191,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		rte_errno = ENOMEM;
 		goto error_before_flow;
 	}
-	flow->drv_type = flow_get_drv_type(dev, attr);
+	flow->drv_type = type;
 	if (hairpin_id != 0)
 		flow->hairpin_flow_id = hairpin_id;
 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
@@ -4339,7 +4342,8 @@ struct rte_flow *
 	struct rte_flow_error error;
 
 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
-				actions, false, &error);
+				actions, false,
+				flow_get_drv_type(dev, &attr), &error);
 }
 
 /**
@@ -4356,9 +4360,13 @@ struct rte_flow *
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flows *flow_list;
+	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
-	return flow_list_create(dev, &priv->noncached_flows,
-				attr, items, actions, true, error);
+	flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows :
+						  &priv->cached_flows;
+	return flow_list_create(dev, flow_list, attr,
+				items, actions, true, type, error);
 }
 
 /**
@@ -4548,8 +4556,8 @@ struct rte_flow *
 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	actions[0].conf = &jump;
 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
-	flow = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions,
+				false, flow_get_drv_type(dev, &attr), &error);
 	if (!flow) {
 		DRV_LOG(DEBUG,
 			"Failed to create ctrl flow: rte_errno(%d),"
@@ -4636,8 +4644,8 @@ struct rte_flow *
 	}
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
-	flow = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions,
+				false, flow_get_drv_type(dev, &attr), &error);
 	if (!flow)
 		return -rte_errno;
 	return 0;
@@ -5078,7 +5086,7 @@ struct rte_flow *
 	}
 	flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr,
 				fdir_flow->items, fdir_flow->actions, true,
-				NULL);
+				flow_get_drv_type(dev, &fdir_flow->attr), NULL);
 	if (!flow)
 		goto error;
 	MLX5_ASSERT(!flow->fdir);
@@ -5695,8 +5703,8 @@ struct mlx5_flow_counter *
 		if (!config->dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
-		flow = flow_list_create(dev, NULL, &attr, items,
-					actions, false, &error);
+		flow = flow_list_create(dev, NULL, &attr, items, actions, false,
+					flow_get_drv_type(dev, &attr), &error);
 		if (!flow)
 			continue;
 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                   ` (2 preceding siblings ...)
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

Introduce a new structure "mlx5_flow_dv_handle" based on device flow
structures "mlx5_flow" and "mlx5_flow_dv", and in the meanwhile, the
"mlx5_flow" is kept for Verbs flow.
Only the matchers and actions objects will be saved in order to free
such resource when destroying a flow. The other information will be
stored by using some intermediate global variables that can be reused
for all flows when being created.
Inbox OFED driver should also be taken into consideration.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 184 +++++++++++++++++++-----
 drivers/net/mlx5/mlx5_flow.h    |  40 +++++-
 drivers/net/mlx5/mlx5_flow_dv.c | 310 +++++++++++++++++++++-------------------
 3 files changed, 350 insertions(+), 184 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8fb973b..1121904 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -709,19 +709,42 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] type
+ *   Driver type of the RTE flow.
+ * @param[in] sub_flow
+ *   Pointer to device flow or flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+		       enum mlx5_flow_drv_type type __rte_unused,
+		       void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
-			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct rte_flow *flow;
+	int mark;
+	int tunnel;
+	uint64_t layers;
 	unsigned int i;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (type == MLX5_FLOW_TYPE_DV) {
+		struct mlx5_flow_dv_handle *handle = sub_flow;
+		mark = !!(handle->action_flags &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = handle->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = handle->m_flow;
+	} else {
+#endif
+		struct mlx5_flow *dev_flow = sub_flow;
+		mark = !!(dev_flow->actions &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = dev_flow->layers;
+		tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -747,8 +770,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				if ((tunnels_info[j].tunnel & layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -771,9 +793,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_set(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	if (type == MLX5_FLOW_TYPE_DV)
+		SLIST_FOREACH(handle, &flow->handles, next)
+			flow_drv_rxq_flags_set(dev, type, (void *)handle);
+	else
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			flow_drv_rxq_flags_set(dev, type, (void *)dev_flow);
 }
 
 /**
@@ -782,20 +812,44 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] type
+ *   Driver type of the RTE flow.
+ * @param[in] sub_flow
+ *   Pointer to device flow or flow handle structure.
+
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+			enum mlx5_flow_drv_type type __rte_unused,
+			void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
-			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct rte_flow *flow;
+	int mark;
+	int tunnel;
+	uint64_t layers;
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (type == MLX5_FLOW_TYPE_DV) {
+		struct mlx5_flow_dv_handle *handle = sub_flow;
+		mark = !!(handle->action_flags &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = handle->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = handle->m_flow;
+	} else {
+#endif
+		struct mlx5_flow *dev_flow = sub_flow;
+		mark = !!(dev_flow->actions &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = dev_flow->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -816,8 +870,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				if ((tunnels_info[j].tunnel & layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -841,9 +894,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_trim(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	if (type == MLX5_FLOW_TYPE_DV)
+		SLIST_FOREACH(handle, &flow->handles, next)
+			flow_drv_rxq_flags_trim(dev, type, (void *)handle);
+	else
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow);
 }
 
 /**
@@ -2341,10 +2402,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			     struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->qrss_id)
-			flow_qrss_free_id(dev, dev_flow->qrss_id);
+	if (type == MLX5_FLOW_TYPE_DV) {
+		SLIST_FOREACH(handle, &flow->handles, next)
+			if (handle->qrss_id)
+				flow_qrss_free_id(dev, handle->qrss_id);
+	} else {
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			if (dev_flow->qrss_id)
+				flow_qrss_free_id(dev, dev_flow->qrss_id);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 }
 
 static int
@@ -3434,10 +3507,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
 	if (!dev_flow)
 		return -rte_errno;
-	dev_flow->flow = flow;
 	dev_flow->external = external;
-	/* Subflow object was created, we must include one in the list. */
-	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+	dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+		SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next);
+		dev_flow->dv_handle->sidx = flow->sub_flows++;
+		dev_flow->dv_handle->m_flow = flow;
+	} else {
+#endif
+		/* Subflow obj was created, we must include one in the list. */
+		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3900,6 +3983,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * other flows in other threads).
 			 */
 			dev_flow->qrss_id = qrss_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+			if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+				dev_flow->dv_handle->qrss_id = qrss_id;
+#endif
 			qrss_id = 0;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
@@ -4012,6 +4099,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			goto exit;
 		}
 		dev_flow->mtr_flow_id = mtr_tag_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+			dev_flow->dv_handle->mtr_flow_id = mtr_tag_id;
+#endif
 		/* Prepare the suffix flow match pattern. */
 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
 			     act_size);
@@ -4164,6 +4255,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	uint32_t hairpin_id = 0;
 	struct rte_flow_attr attr_tx = { .priority = 0 };
 
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
 	if (hairpin_flow > 0) {
 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
@@ -4192,10 +4284,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		goto error_before_flow;
 	}
 	flow->drv_type = type;
+	flow->sub_flows = 0;
 	if (hairpin_id != 0)
 		flow->hairpin_flow_id = hairpin_id;
-	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
-		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
 	flow->rss.queue = (void *)(flow + 1);
 	if (rss) {
 		/*
@@ -4206,7 +4297,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
-	LIST_INIT(&flow->dev_flows);
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+		SLIST_INIT(&flow->handles);
+	else
+		LIST_INIT(&flow->dev_flows);
 	if (rss && rss->types) {
 		unsigned int graph_root;
 
@@ -4243,9 +4337,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
-		dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+			SLIST_INSERT_HEAD(&flow->handles,
+					  dev_flow->dv_handle, next);
+			dev_flow->dv_handle->sidx = flow->sub_flows++;
+			dev_flow->dv_handle->m_flow = flow;
+		} else {
+#endif
+			dev_flow->flow = flow;
+			LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		}
+#endif
 		dev_flow->external = 0;
-		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
 					 actions_hairpin_tx.actions, error);
@@ -4363,8 +4468,17 @@ struct rte_flow *
 	struct mlx5_flows *flow_list;
 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
-	flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows :
-						  &priv->cached_flows;
+	if (type == MLX5_FLOW_TYPE_DV) {
+		if (unlikely(!dev->data->dev_started)) {
+			rte_errno = ENODEV;
+			DRV_LOG(DEBUG, "port %u is not started when "
+				"inserting a flow", dev->data->port_id);
+			return NULL;
+		}
+		flow_list = &priv->noncached_flows;
+	} else {
+		flow_list = &priv->cached_flows;
+	}
 	return flow_list_create(dev, flow_list, attr,
 				items, actions, true, type, error);
 }
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 7c31bfe..10ac9c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -468,6 +468,39 @@ struct mlx5_flow_tbl_data_entry {
 	/**< jump resource, at most one for each table created. */
 };
 
+struct mlx5_flow_dv_handle {
+	SLIST_ENTRY(mlx5_flow_dv_handle) next;
+	struct rte_flow *m_flow; /**< Pointer to the main flow. */
+	uint64_t layers;
+	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t action_flags;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
+	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+	/**< Pointer to encap/decap resource in cache. */
+	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+	/**< Pointer to modify header resource in cache. */
+	struct mlx5_flow_dv_jump_tbl_resource *jump;
+	/**< Pointer to the jump action resource. */
+	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
+	/**< Pointer to port ID action resource. */
+	struct mlx5_vf_vlan vf_vlan;
+	/**< Structure for VF VLAN workaround. */
+	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
+	/**< Pointer to push VLAN action resource in cache. */
+	struct mlx5_flow_dv_tag_resource *tag_resource;
+	/**< pointer to the tag action. */
+	struct ibv_flow *flow; /**< Installed flow. */
+	union {
+		uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
+		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+	};
+	uint8_t sidx;
+};
+
 /*
  * Max number of actions per DV flow.
  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
@@ -547,12 +580,12 @@ struct mlx5_flow {
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_dv dv;
+		struct mlx5_flow_dv_handle *dv_handle;
 #endif
 		struct mlx5_flow_verbs verbs;
 	};
 	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+		uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
 		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
 	};
 	bool external; /**< true if the flow is created external to PMD. */
@@ -674,6 +707,9 @@ struct rte_flow {
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
+	SLIST_HEAD(, mlx5_flow_dv_handle) handles;
+	/**< The HEAD of DV handles. */
+	uint8_t sub_flows;
 };
 
 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2878393..2013082 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -75,6 +75,16 @@
 	uint32_t attr;
 };
 
+/* Global temporary device flow. */
+struct mlx5_flow sflow;
+/* Global subsidiary device flows actions' list. */
+struct {
+	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+	uint64_t hash_fields;
+	int actions_n;
+	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+} sflow_act[8];
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -2348,7 +2358,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.encap_decap = cache_resource;
+			dev_flow->dv_handle->encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2374,7 +2384,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->dv.encap_decap = cache_resource;
+	dev_flow->dv_handle->encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2425,7 +2435,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->dv.jump = &tbl_data->jump;
+	dev_flow->dv_handle->jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2463,7 +2473,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.port_id_action = cache_resource;
+			dev_flow->dv_handle->port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2491,7 +2501,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->dv.port_id_action = cache_resource;
+	dev_flow->dv_handle->port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2534,7 +2544,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.push_vlan_res = cache_resource;
+			dev_flow->dv_handle->push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2563,7 +2573,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->dv.push_vlan_res = cache_resource;
+	dev_flow->dv_handle->push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3652,7 +3662,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.modify_hdr = cache_resource;
+			dev_flow->dv_handle->modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3679,7 +3689,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->dv.modify_hdr = cache_resource;
+	dev_flow->dv_handle->modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5102,19 +5112,24 @@ struct field_modify_info modify_tcp[] = {
 		const struct rte_flow_action actions[] __rte_unused,
 		struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow);
+	size_t size = sizeof(struct mlx5_flow_dv_handle);
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	/* No need to clear to 0. */
+	dev_flow = &sflow;
+	dv_handle = rte_zmalloc(__func__, size, 0);
+	if (!dv_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
-	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	dev_flow->ingress = attr->ingress;
 	dev_flow->transfer = attr->transfer;
+	dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/* DV support already defined, compiler will happy for inbox driver. */
+	dev_flow->dv_handle = dv_handle;
 	return dev_flow;
 }
 
@@ -5253,7 +5268,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->dv.vf_vlan.tag =
+		dev_flow->dv_handle->vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6712,7 +6727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->dv.matcher = cache_matcher;
+			dev_flow->dv_handle->matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6749,7 +6764,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->dv.matcher = cache_matcher;
+	dev_flow->dv_handle->matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6791,7 +6806,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->dv.tag_resource = cache_resource;
+		dev_flow->dv_handle->tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -6820,7 +6835,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->dv.tag_resource = cache_resource;
+	dev_flow->dv_handle->tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7022,6 +7037,9 @@ struct field_modify_info modify_tcp[] = {
 				dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
 		}
 	}
+	/* No need to save the hash fileds after creation. */
+	sflow_act[dev_flow->dv_handle->sidx].hash_fields =
+						dev_flow->hash_fields;
 }
 
 /**
@@ -7065,6 +7083,7 @@ struct field_modify_info modify_tcp[] = {
 		},
 	};
 	int actions_n = 0;
+	uint8_t sidx = dev_flow->dv_handle->sidx;
 	bool actions_end = false;
 	union {
 		struct mlx5_flow_dv_modify_hdr_resource res;
@@ -7076,9 +7095,9 @@ struct field_modify_info modify_tcp[] = {
 	union flow_dv_attr flow_attr = { .attr = 0 };
 	uint32_t tag_be;
 	union mlx5_flow_tbl_key tbl_key;
-	uint32_t modify_action_position = UINT32_MAX;
+	uint32_t modify_action_pos = UINT32_MAX;
 	void *match_mask = matcher.mask.buf;
-	void *match_value = dev_flow->dv.value.buf;
+	void *match_value = dev_flow->dv_handle->value.buf;
 	uint8_t next_protocol = 0xff;
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
@@ -7122,8 +7141,8 @@ struct field_modify_info modify_tcp[] = {
 			if (flow_dv_port_id_action_resource_register
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.port_id_action->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7132,7 +7151,6 @@ struct field_modify_info modify_tcp[] = {
 				struct rte_flow_action_mark mark = {
 					.id = MLX5_FLOW_MARK_DEFAULT,
 				};
-
 				if (flow_dv_convert_action_mark(dev, &mark,
 								mhdr_res,
 								error))
@@ -7141,12 +7159,12 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->dv_handle->tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7168,12 +7186,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->dv_handle->tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7228,7 +7246,7 @@ struct field_modify_info modify_tcp[] = {
 							      dev_flow->group);
 			if (flow->counter == NULL)
 				goto cnt_err;
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 				flow->counter->action;
 			action_flags |= MLX5_FLOW_ACTION_COUNT;
 			break;
@@ -7248,7 +7266,7 @@ struct field_modify_info modify_tcp[] = {
 						  " object.");
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 						priv->sh->pop_vlan_action;
 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
 			break;
@@ -7270,8 +7288,8 @@ struct field_modify_info modify_tcp[] = {
 			if (flow_dv_create_action_push_vlan
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-					   dev_flow->dv.push_vlan_res->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7297,8 +7315,8 @@ struct field_modify_info modify_tcp[] = {
 							   attr->transfer,
 							   error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			action_flags |= actions->type ==
 					RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
 					MLX5_FLOW_ACTION_VXLAN_ENCAP :
@@ -7310,8 +7328,8 @@ struct field_modify_info modify_tcp[] = {
 							   attr->transfer,
 							   error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			action_flags |= actions->type ==
 					RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
 					MLX5_FLOW_ACTION_VXLAN_DECAP :
@@ -7323,16 +7341,16 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_create_action_raw_encap
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
 				    (dev, actions, dev_flow, attr->transfer,
 				     error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
 			break;
@@ -7347,8 +7365,8 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_create_action_l2_decap
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
@@ -7379,8 +7397,8 @@ struct field_modify_info modify_tcp[] = {
 						 NULL,
 						 "cannot create jump action.");
 			}
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.jump->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7485,7 +7503,7 @@ struct field_modify_info modify_tcp[] = {
 						"or invalid parameters");
 			}
 			/* Set the meter action. */
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 				flow->meter->mfts->meter_action;
 			action_flags |= MLX5_FLOW_ACTION_METER;
 			break;
@@ -7508,19 +7526,19 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_modify_hdr_resource_register
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
-				dev_flow->dv.actions[modify_action_position] =
-					dev_flow->dv.modify_hdr->verbs_action;
+				sflow_act[sidx].actions[modify_action_pos] =
+				dev_flow->dv_handle->modify_hdr->verbs_action;
 			}
 			break;
 		default:
 			break;
 		}
-		if (mhdr_res->actions_num &&
-		    modify_action_position == UINT32_MAX)
-			modify_action_position = actions_n++;
+		if (mhdr_res->actions_num && modify_action_pos == UINT32_MAX)
+			modify_action_pos = actions_n++;
 	}
-	dev_flow->dv.actions_n = actions_n;
-	dev_flow->actions = action_flags;
+	sflow_act[sidx].actions_n = actions_n;
+	sflow_act[sidx].transfer = dev_flow->transfer;
+	dev_flow->dv_handle->action_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7707,7 +7725,7 @@ struct field_modify_info modify_tcp[] = {
 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
 					      dev_flow->dv.value.buf));
 #endif
-	dev_flow->layers = item_flags;
+	dev_flow->dv_handle->layers = item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7742,21 +7760,23 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv *dv;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	void *matcher_obj;
 	int n;
 	int err;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		n = dv->actions_n;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			if (dev_flow->transfer) {
-				dv->actions[n++] = priv->sh->esw_drop_action;
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		uint8_t sidx = dv_handle->sidx;
+		n = sflow_act[sidx].actions_n;
+
+		if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) {
+			if (sflow_act[sidx].transfer) {
+				sflow_act[sidx].actions[n++] =
+						priv->sh->esw_drop_action;
 			} else {
-				dv->hrxq = mlx5_hrxq_drop_new(dev);
-				if (!dv->hrxq) {
+				dv_handle->hrxq = mlx5_hrxq_drop_new(dev);
+				if (!dv_handle->hrxq) {
 					rte_flow_error_set
 						(error, errno,
 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7764,26 +7784,27 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot get drop hash queue");
 					goto error;
 				}
-				dv->actions[n++] = dv->hrxq->action;
+				sflow_act[sidx].actions[n++] =
+						dv_handle->hrxq->action;
 			}
-		} else if (dev_flow->actions &
+		} else if (dv_handle->action_flags &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
 			MLX5_ASSERT(flow->rss.queue);
 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
 					     MLX5_RSS_HASH_KEY_LEN,
-					     dev_flow->hash_fields,
+					     sflow_act[sidx].hash_fields,
 					     (*flow->rss.queue),
 					     flow->rss.queue_num);
 			if (!hrxq) {
 				hrxq = mlx5_hrxq_new
 					(dev, flow->rss.key,
 					 MLX5_RSS_HASH_KEY_LEN,
-					 dev_flow->hash_fields,
+					 sflow_act[sidx].hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->layers &
+					 !!(dv_handle->layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -7793,47 +7814,45 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dv->hrxq = hrxq;
-			dv->actions[n++] = dv->hrxq->action;
+			dv_handle->hrxq = hrxq;
+			sflow_act[sidx].actions[n++] = hrxq->action;
 		}
-		dv->flow =
-			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
-						  (void *)&dv->value, n,
-						  dv->actions);
-		if (!dv->flow) {
+		matcher_obj = dv_handle->matcher->matcher_object;
+		dv_handle->flow =
+			mlx5_glue->dv_create_flow(matcher_obj,
+						  (void *)&dv_handle->value,
+						  n, sflow_act[sidx].actions);
+		if (!dv_handle->flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
 					   "hardware refuses to create flow");
 			goto error;
 		}
-		if (priv->vmwa_context &&
-		    dev_flow->dv.vf_vlan.tag &&
-		    !dev_flow->dv.vf_vlan.created) {
+		if (priv->vmwa_context && dv_handle->vf_vlan.tag &&
+		    !dv_handle->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dv_handle->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_dv *dv = &dev_flow->dv;
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		if (dv_handle->hrxq) {
+			if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dv_handle->hrxq);
+			dv_handle->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -7844,17 +7863,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_matcher_release(struct rte_eth_dev *dev,
-			struct mlx5_flow *flow)
+			struct mlx5_flow_dv_handle *handle)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_flow_dv_matcher *matcher = handle->matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -7877,17 +7896,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release an encap/decap resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->dv.encap_decap;
+						handle->encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -7910,17 +7929,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
-				  struct mlx5_flow *flow)
+				  struct mlx5_flow_dv_handle *handle)
 {
-	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = handle->jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -7944,17 +7963,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release a modify-header resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->dv.modify_hdr;
+						handle->modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -7975,17 +7994,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release port ID action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-		flow->dv.port_id_action;
+						handle->port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8006,17 +8025,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release push vlan action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-		flow->dv.push_vlan_res;
+						handle->push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8046,27 +8065,24 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_dv *dv;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		if (dv->flow) {
-			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
-			dv->flow = NULL;
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		if (dv_handle->flow) {
+			claim_zero(mlx5_glue->dv_destroy_flow(dv_handle->flow));
+			dv_handle->flow = NULL;
 		}
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dv_handle->hrxq) {
+			if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dv_handle->hrxq);
+			dv_handle->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
 	}
 }
 
@@ -8082,7 +8098,7 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
 	if (!flow)
 		return;
@@ -8095,24 +8111,24 @@ struct field_modify_info modify_tcp[] = {
 		mlx5_flow_meter_detach(flow->meter);
 		flow->meter = NULL;
 	}
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->dv.matcher)
-			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->dv.encap_decap)
-			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->dv.modify_hdr)
-			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->dv.jump)
-			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->dv.port_id_action)
-			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->dv.push_vlan_res)
-			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->dv.tag_resource)
-			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
-		rte_free(dev_flow);
+	while (!SLIST_EMPTY(&flow->handles)) {
+		dv_handle = SLIST_FIRST(&flow->handles);
+		SLIST_REMOVE_HEAD(&flow->handles, next);
+		if (dv_handle->matcher)
+			flow_dv_matcher_release(dev, dv_handle);
+		if (dv_handle->encap_decap)
+			flow_dv_encap_decap_resource_release(dv_handle);
+		if (dv_handle->modify_hdr)
+			flow_dv_modify_hdr_resource_release(dv_handle);
+		if (dv_handle->jump)
+			flow_dv_jump_tbl_resource_release(dev, dv_handle);
+		if (dv_handle->port_id_action)
+			flow_dv_port_id_action_resource_release(dv_handle);
+		if (dv_handle->push_vlan_res)
+			flow_dv_push_vlan_action_resource_release(dv_handle);
+		if (dv_handle->tag_resource)
+			flow_dv_tag_release(dev, dv_handle->tag_resource);
+		rte_free(dv_handle);
 	}
 }
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                   ` (3 preceding siblings ...)
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

Some structures are defined in the mlx5_flow header file and only
used for flows with DV driver type. When using inbox driver, the DV
mode is not supported. But the code is unique and there is no need
to use pre-processing macro since all code could be compiled.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 42 ++++++++----------------------------------
 drivers/net/mlx5/mlx5_flow.h |  2 --
 2 files changed, 8 insertions(+), 36 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1121904..2b2ba20 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -716,7 +716,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  */
 static void
 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
-		       enum mlx5_flow_drv_type type __rte_unused,
+		       enum mlx5_flow_drv_type type,
 		       void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -726,7 +726,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	uint64_t layers;
 	unsigned int i;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (type == MLX5_FLOW_TYPE_DV) {
 		struct mlx5_flow_dv_handle *handle = sub_flow;
 		mark = !!(handle->action_flags &
@@ -735,16 +734,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = handle->m_flow;
 	} else {
-#endif
 		struct mlx5_flow *dev_flow = sub_flow;
 		mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 		layers = dev_flow->layers;
 		tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = dev_flow->flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -793,15 +789,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	struct mlx5_flow_dv_handle *handle;
 	if (type == MLX5_FLOW_TYPE_DV)
 		SLIST_FOREACH(handle, &flow->handles, next)
 			flow_drv_rxq_flags_set(dev, type, (void *)handle);
 	else
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			flow_drv_rxq_flags_set(dev, type, (void *)dev_flow);
 }
@@ -820,7 +814,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  */
 static void
 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
-			enum mlx5_flow_drv_type type __rte_unused,
+			enum mlx5_flow_drv_type type,
 			void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -831,7 +825,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (type == MLX5_FLOW_TYPE_DV) {
 		struct mlx5_flow_dv_handle *handle = sub_flow;
 		mark = !!(handle->action_flags &
@@ -840,16 +833,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = handle->m_flow;
 	} else {
-#endif
 		struct mlx5_flow *dev_flow = sub_flow;
 		mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 		layers = dev_flow->layers;
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = dev_flow->flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -894,15 +884,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	struct mlx5_flow_dv_handle *handle;
 	if (type == MLX5_FLOW_TYPE_DV)
 		SLIST_FOREACH(handle, &flow->handles, next)
 			flow_drv_rxq_flags_trim(dev, type, (void *)handle);
 	else
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow);
 }
@@ -2402,7 +2390,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			     struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
@@ -2411,13 +2398,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			if (handle->qrss_id)
 				flow_qrss_free_id(dev, handle->qrss_id);
 	} else {
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			if (dev_flow->qrss_id)
 				flow_qrss_free_id(dev, dev_flow->qrss_id);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 }
 
 static int
@@ -3509,18 +3493,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		return -rte_errno;
 	dev_flow->external = external;
 	dev_flow->flow = flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
 		SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next);
 		dev_flow->dv_handle->sidx = flow->sub_flows++;
 		dev_flow->dv_handle->m_flow = flow;
 	} else {
-#endif
 		/* Subflow obj was created, we must include one in the list. */
 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3982,11 +3962,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->qrss_id = qrss_id;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 			if (flow->drv_type == MLX5_FLOW_TYPE_DV)
 				dev_flow->dv_handle->qrss_id = qrss_id;
-#endif
+			else
+				dev_flow->qrss_id = qrss_id;
 			qrss_id = 0;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
@@ -4098,11 +4077,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->mtr_flow_id = mtr_tag_id;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		if (flow->drv_type == MLX5_FLOW_TYPE_DV)
 			dev_flow->dv_handle->mtr_flow_id = mtr_tag_id;
-#endif
+		else
+			dev_flow->mtr_flow_id = mtr_tag_id;
 		/* Prepare the suffix flow match pattern. */
 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
 			     act_size);
@@ -4337,19 +4315,15 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
 			SLIST_INSERT_HEAD(&flow->handles,
 					  dev_flow->dv_handle, next);
 			dev_flow->dv_handle->sidx = flow->sub_flows++;
 			dev_flow->dv_handle->m_flow = flow;
 		} else {
-#endif
 			dev_flow->flow = flow;
 			LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		}
-#endif
 		dev_flow->external = 0;
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 10ac9c3..5e517c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -579,9 +579,7 @@ struct mlx5_flow {
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		struct mlx5_flow_dv_handle *dv_handle;
-#endif
 		struct mlx5_flow_verbs verbs;
 	};
 	union {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                   ` (4 preceding siblings ...)
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
@ 2020-02-03 13:32 ` Bing Zhao
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  6 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-03 13:32 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

The matcher value is a series of bits with specified format that
defined by the hardware interface. PMD driver needs to translate the
packet header into the matcher format and then used to create the
flow with the lower layer driver.
And this matcher value is only used when creating a flow, and when
destroying it, only the lower layer driver object related to the
matcher needs to be released. So there is no need to save such huge
block information of a device flow.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.h    |  2 --
 drivers/net/mlx5/mlx5_flow_dv.c | 28 ++++++++++++++++++++--------
 2 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5e517c3..af30438 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -513,8 +513,6 @@ struct mlx5_flow_dv {
 	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	struct mlx5_flow_dv_match_params value;
-	/**< Holds the value that the packet is compared to. */
 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
 	/**< Pointer to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2013082..111b01d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -56,7 +56,7 @@
 
 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
 					  sizeof(struct rte_flow_item_ipv4))
-/* VLAN header definitions */
+/* VLAN header definitions. */
 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
@@ -75,15 +75,23 @@
 	uint32_t attr;
 };
 
+/* Maximal number of global temporary device flow. */
+#define MLX5DV_FLOW_HANDLE_MAX_NUM 8
 /* Global temporary device flow. */
 struct mlx5_flow sflow;
 /* Global subsidiary device flows actions' list. */
 struct {
 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+	/**< Action list. */
 	uint64_t hash_fields;
+	/**< Verbs hash Rx queue hash fields. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
 	int actions_n;
-	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
-} sflow_act[8];
+	/**< number of actions. */
+	uint8_t transfer;
+	/**< 1 if the flow is E-Switch flow. */
+} sflow_act[MLX5DV_FLOW_HANDLE_MAX_NUM];
 
 /**
  * Initialize flow attributes structure according to flow items' types.
@@ -5127,7 +5135,6 @@ struct field_modify_info modify_tcp[] = {
 	}
 	dev_flow->ingress = attr->ingress;
 	dev_flow->transfer = attr->transfer;
-	dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	/* DV support already defined, compiler will happy for inbox driver. */
 	dev_flow->dv_handle = dv_handle;
 	return dev_flow;
@@ -7097,7 +7104,7 @@ struct field_modify_info modify_tcp[] = {
 	union mlx5_flow_tbl_key tbl_key;
 	uint32_t modify_action_pos = UINT32_MAX;
 	void *match_mask = matcher.mask.buf;
-	void *match_value = dev_flow->dv_handle->value.buf;
+	void *match_value = &sflow_act[sidx].value.buf;
 	uint8_t next_protocol = 0xff;
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
@@ -7539,6 +7546,11 @@ struct field_modify_info modify_tcp[] = {
 	sflow_act[sidx].actions_n = actions_n;
 	sflow_act[sidx].transfer = dev_flow->transfer;
 	dev_flow->dv_handle->action_flags = action_flags;
+	/* Matcher size is fixed right now. */
+	sflow_act[sidx].value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/* Clear buffer in case of dirty content. */
+	memset(&sflow_act[sidx].value.buf, 0,
+	       MLX5_ST_SZ_BYTES(fte_match_param));
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7723,7 +7735,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
-					      dev_flow->dv.value.buf));
+					      sflow_act[sidx].value.buf));
 #endif
 	dev_flow->dv_handle->layers = item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
@@ -7820,8 +7832,8 @@ struct field_modify_info modify_tcp[] = {
 		matcher_obj = dv_handle->matcher->matcher_object;
 		dv_handle->flow =
 			mlx5_glue->dv_create_flow(matcher_obj,
-						  (void *)&dv_handle->value,
-						  n, sflow_act[sidx].actions);
+						  &sflow_act[sidx].value, n,
+						  sflow_act[sidx].actions);
 		if (!dv_handle->flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules
  2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                   ` (5 preceding siblings ...)
  2020-02-03 13:32 ` [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
@ 2020-02-04 11:33 ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
                     ` (7 more replies)
  6 siblings, 8 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

This patch set will remove the flow rules cache and move to the 
non-cached mode for DV mode. For Verbs mode flow rules, the behavior
will remain the same.
In the device closing stage, all the software resources for flows
created will be freed and corresponding hardware resources will be
released. Then the total cost of the memory will be reduced and the
behavior of mlx5 PMD will comply fully with the ethdev API
expectations.
After closing a device, all the flow rules stored in application
layer will no longer be valid anymore. Application should synchronize
the database and do not try to destory any rule on this device.
And after a device restarting, all the needed flow rules should be
reinserted via the create routine in the rte_flow lib.

v2 Changes:
    Fix the compiling error with MLX5 Debug mode in the 4th commit
    of "net/mlx5: introduce handle structure for DV flows".

Bing Zhao (6):
  net/mlx5: introduce non-cached flows tailq list
  net/mlx5: change operations of non-cached flows
  net/mlx5: flow type check before creating
  net/mlx5: introduce handle structure for DV flows
  net/mlx5: remove the DV support macro checking
  net/mlx5: do not save device flow matcher value

 drivers/net/mlx5/mlx5.c         |   4 +-
 drivers/net/mlx5/mlx5.h         |   5 +-
 drivers/net/mlx5/mlx5_flow.c    | 246 ++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_flow.h    |  44 +++++-
 drivers/net/mlx5/mlx5_flow_dv.c | 328 ++++++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_trigger.c |  11 +-
 6 files changed, 417 insertions(+), 221 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

A new tailq head is introduced in the mlx5 private structure for each
device. Then all the flows created by user are moved into this tailq
list. This is the first stage to separate the flows with DV mode from
the flows with Verbs mode.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  3 ++-
 drivers/net/mlx5/mlx5.h         |  3 ++-
 drivers/net/mlx5/mlx5_flow.c    | 16 ++++++++--------
 drivers/net/mlx5/mlx5_trigger.c |  6 +++---
 4 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 7a79722..6a2d662 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2686,7 +2686,8 @@ struct mlx5_flow_id_pool *
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
 				      MLX5_MAX_MAC_ADDRESSES);
-	TAILQ_INIT(&priv->flows);
+	TAILQ_INIT(&priv->cached_flows);
+	TAILQ_INIT(&priv->noncached_flows);
 	TAILQ_INIT(&priv->ctrl_flows);
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d7c519b..65bdb3b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -515,7 +515,8 @@ struct mlx5_priv {
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
-	struct mlx5_flows flows; /* RTE Flow rules. */
+	struct mlx5_flows cached_flows; /* cached RTE Flow rules. */
+	struct mlx5_flows noncached_flows; /* non-cached RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 144e07c..d7fb094 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4357,7 +4357,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	return flow_list_create(dev, &priv->flows,
+	return flow_list_create(dev, &priv->noncached_flows,
 				attr, items, actions, true, error);
 }
 
@@ -4490,7 +4490,7 @@ struct rte_flow *
 	struct rte_flow *flow;
 	int ret = 0;
 
-	TAILQ_FOREACH(flow, &priv->flows, next) {
+	TAILQ_FOREACH(flow, &priv->noncached_flows, next) {
 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
 			dev->data->port_id, (void *)flow);
 		++ret;
@@ -4674,7 +4674,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_list_destroy(dev, &priv->noncached_flows, flow);
 	return 0;
 }
 
@@ -4690,7 +4690,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 	return 0;
 }
 
@@ -5004,7 +5004,7 @@ struct rte_flow *
 	struct rte_flow *flow = NULL;
 
 	MLX5_ASSERT(fdir_flow);
-	TAILQ_FOREACH(flow, &priv->flows, next) {
+	TAILQ_FOREACH(flow, &priv->noncached_flows, next) {
 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
 				dev->data->port_id, (void *)flow);
@@ -5047,7 +5047,7 @@ struct rte_flow *
 		rte_errno = EEXIST;
 		goto error;
 	}
-	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+	flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr,
 				fdir_flow->items, fdir_flow->actions, true,
 				NULL);
 	if (!flow)
@@ -5092,7 +5092,7 @@ struct rte_flow *
 		rte_errno = ENOENT;
 		return -rte_errno;
 	}
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_list_destroy(dev, &priv->noncached_flows, flow);
 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
 		dev->data->port_id, (void *)flow);
 	return 0;
@@ -5132,7 +5132,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index be47df5..0053847 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -320,7 +320,7 @@
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->flows);
+	ret = mlx5_flow_start(dev, &priv->noncached_flows);
 	if (ret) {
 		DRV_LOG(DEBUG, "port %u failed to set flows",
 			dev->data->port_id);
@@ -337,7 +337,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop(dev, &priv->noncached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -367,7 +367,7 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop(dev, &priv->noncached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating Bing Zhao
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

When stopping a mlx5 device, the flows with non-cached mode will be
flushed. So no operation will be done for these flows in the device
closing stage.
If the device restarts after stopped, no flow with non-cached mode
will be reinserted.
Operations of flows with cached mode remain the same. And when the
flushing is called from user, all the flows will be flushed.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  1 +
 drivers/net/mlx5/mlx5.h         |  2 ++
 drivers/net/mlx5/mlx5_flow.c    | 36 +++++++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_trigger.c | 11 ++++++++---
 4 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6a2d662..4c97df5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1236,6 +1236,7 @@ struct mlx5_flow_id_pool *
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_dev_interrupt_handler_devx_uninstall(dev);
 	mlx5_traffic_disable(dev);
+	/* Only cached flows will be flushed in this stage, if any. */
 	mlx5_flow_flush(dev, NULL);
 	mlx5_flow_meter_flush(dev, NULL);
 	/* Prevent crashes when queues are still in use. */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 65bdb3b..d749b29 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -715,6 +715,8 @@ int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
 void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int mlx5_flow_flush_noncached(struct rte_eth_dev *dev,
+			      struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
 		    struct rte_flow_error *error);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index d7fb094..0560874 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4453,11 +4453,14 @@ struct rte_flow *
 	struct rte_flow_error error;
 	int ret = 0;
 
-	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+	/*
+	 * Make sure default copy action (reg_c[0] -> reg_b) is created.
+	 * This should always be executed no matter the driver type.
+	 */
 	ret = flow_mreg_add_default_copy_action(dev, &error);
 	if (ret < 0)
 		return -rte_errno;
-	/* Apply Flows created by application. */
+	/* Apply Flows created by application, only for cached flows. */
 	TAILQ_FOREACH(flow, list, next) {
 		ret = flow_mreg_start_copy_action(dev, flow);
 		if (ret < 0)
@@ -4674,7 +4677,15 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	flow_list_destroy(dev, &priv->noncached_flows, flow);
+	/*
+	 * Checking the flow type and then destroying the flows in both lists.
+	 * Flow with DV type is non-cached (most cases) and flow with legacy
+	 * verbs mode is still cached right now.
+	 */
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+		flow_list_destroy(dev, &priv->noncached_flows, flow);
+	else
+		flow_list_destroy(dev, &priv->cached_flows, flow);
 	return 0;
 }
 
@@ -4690,6 +4701,24 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
+	/* In most cases, only one tailq list will contain the flows. */
+	mlx5_flow_list_flush(dev, &priv->noncached_flows);
+	mlx5_flow_list_flush(dev, &priv->cached_flows);
+	return 0;
+}
+
+/**
+ * Destroy all non-cached flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush_noncached(struct rte_eth_dev *dev,
+			  struct rte_flow_error *error __rte_unused)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
 	mlx5_flow_list_flush(dev, &priv->noncached_flows);
 	return 0;
 }
@@ -5133,6 +5162,7 @@ struct rte_flow *
 	struct mlx5_priv *priv = dev->data->dev_private;
 
 	mlx5_flow_list_flush(dev, &priv->noncached_flows);
+	mlx5_flow_list_flush(dev, &priv->cached_flows);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 0053847..26f4863 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -320,7 +320,7 @@
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->noncached_flows);
+	ret = mlx5_flow_start(dev, &priv->cached_flows);
 	if (ret) {
 		DRV_LOG(DEBUG, "port %u failed to set flows",
 			dev->data->port_id);
@@ -337,7 +337,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->noncached_flows);
+	mlx5_flow_stop(dev, &priv->cached_flows);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -367,7 +367,12 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->noncached_flows);
+	mlx5_flow_stop(dev, &priv->cached_flows);
+	/*
+	 * Flows flushing is still after deleting default copy action & clearing
+	 * flags of all RX queues.
+	 */
+	mlx5_flow_flush_noncached(dev, NULL);
 	mlx5_traffic_disable(dev);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

When creating a flow, the driver mode needs to be checked in order
to call the corresponding functions. Now the driver mode checking
part is moved out of the flow creating function, then the flow could
be added into the correct tailq list.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 40 ++++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0560874..8fb973b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2874,8 +2874,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
-		 const struct rte_flow_action actions[],
-		 bool external, struct rte_flow_error *error);
+		 const struct rte_flow_action actions[], bool external,
+		 enum mlx5_flow_drv_type type, struct rte_flow_error *error);
 
 static void
 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
@@ -3015,7 +3015,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * by list traversing.
 	 */
 	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
-					 actions, false, error);
+					 actions, false,
+					 flow_get_drv_type(dev, &attr), error);
 	if (!mcp_res->flow)
 		goto error;
 	mcp_res->refcnt++;
@@ -4119,6 +4120,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Associated actions (list terminated by the END action).
  * @param[in] external
  *   This flow rule is created by request external to PMD.
+ * @param[in] type
+ *   Flow rule type, DV or VERBS.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -4129,8 +4132,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
-		 const struct rte_flow_action actions[],
-		 bool external, struct rte_flow_error *error)
+		 const struct rte_flow_action actions[], bool external,
+		 enum mlx5_flow_drv_type type, struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = NULL;
@@ -4188,7 +4191,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		rte_errno = ENOMEM;
 		goto error_before_flow;
 	}
-	flow->drv_type = flow_get_drv_type(dev, attr);
+	flow->drv_type = type;
 	if (hairpin_id != 0)
 		flow->hairpin_flow_id = hairpin_id;
 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
@@ -4339,7 +4342,8 @@ struct rte_flow *
 	struct rte_flow_error error;
 
 	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
-				actions, false, &error);
+				actions, false,
+				flow_get_drv_type(dev, &attr), &error);
 }
 
 /**
@@ -4356,9 +4360,13 @@ struct rte_flow *
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flows *flow_list;
+	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
-	return flow_list_create(dev, &priv->noncached_flows,
-				attr, items, actions, true, error);
+	flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows :
+						  &priv->cached_flows;
+	return flow_list_create(dev, flow_list, attr,
+				items, actions, true, type, error);
 }
 
 /**
@@ -4548,8 +4556,8 @@ struct rte_flow *
 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	actions[0].conf = &jump;
 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
-	flow = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions,
+				false, flow_get_drv_type(dev, &attr), &error);
 	if (!flow) {
 		DRV_LOG(DEBUG,
 			"Failed to create ctrl flow: rte_errno(%d),"
@@ -4636,8 +4644,8 @@ struct rte_flow *
 	}
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
-	flow = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions,
+				false, flow_get_drv_type(dev, &attr), &error);
 	if (!flow)
 		return -rte_errno;
 	return 0;
@@ -5078,7 +5086,7 @@ struct rte_flow *
 	}
 	flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr,
 				fdir_flow->items, fdir_flow->actions, true,
-				NULL);
+				flow_get_drv_type(dev, &fdir_flow->attr), NULL);
 	if (!flow)
 		goto error;
 	MLX5_ASSERT(!flow->fdir);
@@ -5695,8 +5703,8 @@ struct mlx5_flow_counter *
 		if (!config->dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
-		flow = flow_list_create(dev, NULL, &attr, items,
-					actions, false, &error);
+		flow = flow_list_create(dev, NULL, &attr, items, actions, false,
+					flow_get_drv_type(dev, &attr), &error);
 		if (!flow)
 			continue;
 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                     ` (2 preceding siblings ...)
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

Introduce a new structure "mlx5_flow_dv_handle" based on device flow
structures "mlx5_flow" and "mlx5_flow_dv", and in the meanwhile, the
"mlx5_flow" is kept for Verbs flow.
Only the matchers and actions objects will be saved in order to free
such resource when destroying a flow. The other information will be
stored by using some intermediate global variables that can be reused
for all flows when being created.
Inbox OFED driver should also be taken into consideration.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 184 +++++++++++++++++++-----
 drivers/net/mlx5/mlx5_flow.h    |  40 +++++-
 drivers/net/mlx5/mlx5_flow_dv.c | 312 +++++++++++++++++++++-------------------
 3 files changed, 351 insertions(+), 185 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8fb973b..1121904 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -709,19 +709,42 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] type
+ *   Driver type of the RTE flow.
+ * @param[in] sub_flow
+ *   Pointer to device flow or flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+		       enum mlx5_flow_drv_type type __rte_unused,
+		       void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
-			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct rte_flow *flow;
+	int mark;
+	int tunnel;
+	uint64_t layers;
 	unsigned int i;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (type == MLX5_FLOW_TYPE_DV) {
+		struct mlx5_flow_dv_handle *handle = sub_flow;
+		mark = !!(handle->action_flags &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = handle->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = handle->m_flow;
+	} else {
+#endif
+		struct mlx5_flow *dev_flow = sub_flow;
+		mark = !!(dev_flow->actions &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = dev_flow->layers;
+		tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -747,8 +770,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				if ((tunnels_info[j].tunnel & layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -771,9 +793,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_set(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	if (type == MLX5_FLOW_TYPE_DV)
+		SLIST_FOREACH(handle, &flow->handles, next)
+			flow_drv_rxq_flags_set(dev, type, (void *)handle);
+	else
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			flow_drv_rxq_flags_set(dev, type, (void *)dev_flow);
 }
 
 /**
@@ -782,20 +812,44 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] type
+ *   Driver type of the RTE flow.
+ * @param[in] sub_flow
+ *   Pointer to device flow or flow handle structure.
+
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+			enum mlx5_flow_drv_type type __rte_unused,
+			void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
-			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct rte_flow *flow;
+	int mark;
+	int tunnel;
+	uint64_t layers;
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (type == MLX5_FLOW_TYPE_DV) {
+		struct mlx5_flow_dv_handle *handle = sub_flow;
+		mark = !!(handle->action_flags &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = handle->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = handle->m_flow;
+	} else {
+#endif
+		struct mlx5_flow *dev_flow = sub_flow;
+		mark = !!(dev_flow->actions &
+			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+		layers = dev_flow->layers;
+		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+		flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -816,8 +870,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				if ((tunnels_info[j].tunnel & layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -841,9 +894,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_trim(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	if (type == MLX5_FLOW_TYPE_DV)
+		SLIST_FOREACH(handle, &flow->handles, next)
+			flow_drv_rxq_flags_trim(dev, type, (void *)handle);
+	else
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow);
 }
 
 /**
@@ -2341,10 +2402,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			     struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	struct mlx5_flow_dv_handle *handle;
+	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->qrss_id)
-			flow_qrss_free_id(dev, dev_flow->qrss_id);
+	if (type == MLX5_FLOW_TYPE_DV) {
+		SLIST_FOREACH(handle, &flow->handles, next)
+			if (handle->qrss_id)
+				flow_qrss_free_id(dev, handle->qrss_id);
+	} else {
+#endif
+		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+			if (dev_flow->qrss_id)
+				flow_qrss_free_id(dev, dev_flow->qrss_id);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 }
 
 static int
@@ -3434,10 +3507,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
 	if (!dev_flow)
 		return -rte_errno;
-	dev_flow->flow = flow;
 	dev_flow->external = external;
-	/* Subflow object was created, we must include one in the list. */
-	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+	dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+		SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next);
+		dev_flow->dv_handle->sidx = flow->sub_flows++;
+		dev_flow->dv_handle->m_flow = flow;
+	} else {
+#endif
+		/* Subflow obj was created, we must include one in the list. */
+		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	}
+#endif
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3900,6 +3983,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * other flows in other threads).
 			 */
 			dev_flow->qrss_id = qrss_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+			if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+				dev_flow->dv_handle->qrss_id = qrss_id;
+#endif
 			qrss_id = 0;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
@@ -4012,6 +4099,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			goto exit;
 		}
 		dev_flow->mtr_flow_id = mtr_tag_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+			dev_flow->dv_handle->mtr_flow_id = mtr_tag_id;
+#endif
 		/* Prepare the suffix flow match pattern. */
 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
 			     act_size);
@@ -4164,6 +4255,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	uint32_t hairpin_id = 0;
 	struct rte_flow_attr attr_tx = { .priority = 0 };
 
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
 	if (hairpin_flow > 0) {
 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
@@ -4192,10 +4284,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		goto error_before_flow;
 	}
 	flow->drv_type = type;
+	flow->sub_flows = 0;
 	if (hairpin_id != 0)
 		flow->hairpin_flow_id = hairpin_id;
-	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
-		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
 	flow->rss.queue = (void *)(flow + 1);
 	if (rss) {
 		/*
@@ -4206,7 +4297,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
-	LIST_INIT(&flow->dev_flows);
+	if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+		SLIST_INIT(&flow->handles);
+	else
+		LIST_INIT(&flow->dev_flows);
 	if (rss && rss->types) {
 		unsigned int graph_root;
 
@@ -4243,9 +4337,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
-		dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+			SLIST_INSERT_HEAD(&flow->handles,
+					  dev_flow->dv_handle, next);
+			dev_flow->dv_handle->sidx = flow->sub_flows++;
+			dev_flow->dv_handle->m_flow = flow;
+		} else {
+#endif
+			dev_flow->flow = flow;
+			LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		}
+#endif
 		dev_flow->external = 0;
-		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
 					 actions_hairpin_tx.actions, error);
@@ -4363,8 +4468,17 @@ struct rte_flow *
 	struct mlx5_flows *flow_list;
 	enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
 
-	flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows :
-						  &priv->cached_flows;
+	if (type == MLX5_FLOW_TYPE_DV) {
+		if (unlikely(!dev->data->dev_started)) {
+			rte_errno = ENODEV;
+			DRV_LOG(DEBUG, "port %u is not started when "
+				"inserting a flow", dev->data->port_id);
+			return NULL;
+		}
+		flow_list = &priv->noncached_flows;
+	} else {
+		flow_list = &priv->cached_flows;
+	}
 	return flow_list_create(dev, flow_list, attr,
 				items, actions, true, type, error);
 }
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 7c31bfe..10ac9c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -468,6 +468,39 @@ struct mlx5_flow_tbl_data_entry {
 	/**< jump resource, at most one for each table created. */
 };
 
+struct mlx5_flow_dv_handle {
+	SLIST_ENTRY(mlx5_flow_dv_handle) next;
+	struct rte_flow *m_flow; /**< Pointer to the main flow. */
+	uint64_t layers;
+	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t action_flags;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
+	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+	/**< Pointer to encap/decap resource in cache. */
+	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+	/**< Pointer to modify header resource in cache. */
+	struct mlx5_flow_dv_jump_tbl_resource *jump;
+	/**< Pointer to the jump action resource. */
+	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
+	/**< Pointer to port ID action resource. */
+	struct mlx5_vf_vlan vf_vlan;
+	/**< Structure for VF VLAN workaround. */
+	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
+	/**< Pointer to push VLAN action resource in cache. */
+	struct mlx5_flow_dv_tag_resource *tag_resource;
+	/**< pointer to the tag action. */
+	struct ibv_flow *flow; /**< Installed flow. */
+	union {
+		uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
+		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+	};
+	uint8_t sidx;
+};
+
 /*
  * Max number of actions per DV flow.
  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
@@ -547,12 +580,12 @@ struct mlx5_flow {
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_dv dv;
+		struct mlx5_flow_dv_handle *dv_handle;
 #endif
 		struct mlx5_flow_verbs verbs;
 	};
 	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+		uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
 		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
 	};
 	bool external; /**< true if the flow is created external to PMD. */
@@ -674,6 +707,9 @@ struct rte_flow {
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
+	SLIST_HEAD(, mlx5_flow_dv_handle) handles;
+	/**< The HEAD of DV handles. */
+	uint8_t sub_flows;
 };
 
 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2878393..33a3d70 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -75,6 +75,16 @@
 	uint32_t attr;
 };
 
+/* Global temporary device flow. */
+struct mlx5_flow sflow;
+/* Global subsidiary device flows actions' list. */
+struct {
+	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+	uint64_t hash_fields;
+	int actions_n;
+	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+} sflow_act[8];
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -2348,7 +2358,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.encap_decap = cache_resource;
+			dev_flow->dv_handle->encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2374,7 +2384,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->dv.encap_decap = cache_resource;
+	dev_flow->dv_handle->encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2425,7 +2435,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->dv.jump = &tbl_data->jump;
+	dev_flow->dv_handle->jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2463,7 +2473,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.port_id_action = cache_resource;
+			dev_flow->dv_handle->port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2491,7 +2501,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->dv.port_id_action = cache_resource;
+	dev_flow->dv_handle->port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2534,7 +2544,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.push_vlan_res = cache_resource;
+			dev_flow->dv_handle->push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2563,7 +2573,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->dv.push_vlan_res = cache_resource;
+	dev_flow->dv_handle->push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3652,7 +3662,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.modify_hdr = cache_resource;
+			dev_flow->dv_handle->modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3679,7 +3689,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->dv.modify_hdr = cache_resource;
+	dev_flow->dv_handle->modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5102,19 +5112,24 @@ struct field_modify_info modify_tcp[] = {
 		const struct rte_flow_action actions[] __rte_unused,
 		struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow);
+	size_t size = sizeof(struct mlx5_flow_dv_handle);
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	/* No need to clear to 0. */
+	dev_flow = &sflow;
+	dv_handle = rte_zmalloc(__func__, size, 0);
+	if (!dv_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
-	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	dev_flow->ingress = attr->ingress;
 	dev_flow->transfer = attr->transfer;
+	dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/* DV support already defined, compiler will happy for inbox driver. */
+	dev_flow->dv_handle = dv_handle;
 	return dev_flow;
 }
 
@@ -5253,7 +5268,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->dv.vf_vlan.tag =
+		dev_flow->dv_handle->vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6712,7 +6727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->dv.matcher = cache_matcher;
+			dev_flow->dv_handle->matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6749,7 +6764,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->dv.matcher = cache_matcher;
+	dev_flow->dv_handle->matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6791,7 +6806,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->dv.tag_resource = cache_resource;
+		dev_flow->dv_handle->tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -6820,7 +6835,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->dv.tag_resource = cache_resource;
+	dev_flow->dv_handle->tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7022,6 +7037,9 @@ struct field_modify_info modify_tcp[] = {
 				dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
 		}
 	}
+	/* No need to save the hash fileds after creation. */
+	sflow_act[dev_flow->dv_handle->sidx].hash_fields =
+						dev_flow->hash_fields;
 }
 
 /**
@@ -7065,6 +7083,7 @@ struct field_modify_info modify_tcp[] = {
 		},
 	};
 	int actions_n = 0;
+	uint8_t sidx = dev_flow->dv_handle->sidx;
 	bool actions_end = false;
 	union {
 		struct mlx5_flow_dv_modify_hdr_resource res;
@@ -7076,9 +7095,9 @@ struct field_modify_info modify_tcp[] = {
 	union flow_dv_attr flow_attr = { .attr = 0 };
 	uint32_t tag_be;
 	union mlx5_flow_tbl_key tbl_key;
-	uint32_t modify_action_position = UINT32_MAX;
+	uint32_t modify_action_pos = UINT32_MAX;
 	void *match_mask = matcher.mask.buf;
-	void *match_value = dev_flow->dv.value.buf;
+	void *match_value = dev_flow->dv_handle->value.buf;
 	uint8_t next_protocol = 0xff;
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
@@ -7122,8 +7141,8 @@ struct field_modify_info modify_tcp[] = {
 			if (flow_dv_port_id_action_resource_register
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.port_id_action->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7132,7 +7151,6 @@ struct field_modify_info modify_tcp[] = {
 				struct rte_flow_action_mark mark = {
 					.id = MLX5_FLOW_MARK_DEFAULT,
 				};
-
 				if (flow_dv_convert_action_mark(dev, &mark,
 								mhdr_res,
 								error))
@@ -7141,12 +7159,12 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->dv_handle->tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7168,12 +7186,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->dv_handle->tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7228,7 +7246,7 @@ struct field_modify_info modify_tcp[] = {
 							      dev_flow->group);
 			if (flow->counter == NULL)
 				goto cnt_err;
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 				flow->counter->action;
 			action_flags |= MLX5_FLOW_ACTION_COUNT;
 			break;
@@ -7248,7 +7266,7 @@ struct field_modify_info modify_tcp[] = {
 						  " object.");
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 						priv->sh->pop_vlan_action;
 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
 			break;
@@ -7270,8 +7288,8 @@ struct field_modify_info modify_tcp[] = {
 			if (flow_dv_create_action_push_vlan
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-					   dev_flow->dv.push_vlan_res->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7297,8 +7315,8 @@ struct field_modify_info modify_tcp[] = {
 							   attr->transfer,
 							   error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			action_flags |= actions->type ==
 					RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
 					MLX5_FLOW_ACTION_VXLAN_ENCAP :
@@ -7310,8 +7328,8 @@ struct field_modify_info modify_tcp[] = {
 							   attr->transfer,
 							   error))
 				return -rte_errno;
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			action_flags |= actions->type ==
 					RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
 					MLX5_FLOW_ACTION_VXLAN_DECAP :
@@ -7323,16 +7341,16 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_create_action_raw_encap
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
 				    (dev, actions, dev_flow, attr->transfer,
 				     error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
 			break;
@@ -7347,8 +7365,8 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_create_action_l2_decap
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
-				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
@@ -7379,8 +7397,8 @@ struct field_modify_info modify_tcp[] = {
 						 NULL,
 						 "cannot create jump action.");
 			}
-			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.jump->action;
+			sflow_act[sidx].actions[actions_n++] =
+				dev_flow->dv_handle->jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7485,7 +7503,7 @@ struct field_modify_info modify_tcp[] = {
 						"or invalid parameters");
 			}
 			/* Set the meter action. */
-			dev_flow->dv.actions[actions_n++] =
+			sflow_act[sidx].actions[actions_n++] =
 				flow->meter->mfts->meter_action;
 			action_flags |= MLX5_FLOW_ACTION_METER;
 			break;
@@ -7508,19 +7526,19 @@ struct field_modify_info modify_tcp[] = {
 				if (flow_dv_modify_hdr_resource_register
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
-				dev_flow->dv.actions[modify_action_position] =
-					dev_flow->dv.modify_hdr->verbs_action;
+				sflow_act[sidx].actions[modify_action_pos] =
+				dev_flow->dv_handle->modify_hdr->verbs_action;
 			}
 			break;
 		default:
 			break;
 		}
-		if (mhdr_res->actions_num &&
-		    modify_action_position == UINT32_MAX)
-			modify_action_position = actions_n++;
+		if (mhdr_res->actions_num && modify_action_pos == UINT32_MAX)
+			modify_action_pos = actions_n++;
 	}
-	dev_flow->dv.actions_n = actions_n;
-	dev_flow->actions = action_flags;
+	sflow_act[sidx].actions_n = actions_n;
+	sflow_act[sidx].transfer = dev_flow->transfer;
+	dev_flow->dv_handle->action_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7705,9 +7723,9 @@ struct field_modify_info modify_tcp[] = {
 	}
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
-					      dev_flow->dv.value.buf));
+					      dev_flow->dv_handle->value.buf));
 #endif
-	dev_flow->layers = item_flags;
+	dev_flow->dv_handle->layers = item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7742,21 +7760,23 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv *dv;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	void *matcher_obj;
 	int n;
 	int err;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		n = dv->actions_n;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			if (dev_flow->transfer) {
-				dv->actions[n++] = priv->sh->esw_drop_action;
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		uint8_t sidx = dv_handle->sidx;
+		n = sflow_act[sidx].actions_n;
+
+		if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) {
+			if (sflow_act[sidx].transfer) {
+				sflow_act[sidx].actions[n++] =
+						priv->sh->esw_drop_action;
 			} else {
-				dv->hrxq = mlx5_hrxq_drop_new(dev);
-				if (!dv->hrxq) {
+				dv_handle->hrxq = mlx5_hrxq_drop_new(dev);
+				if (!dv_handle->hrxq) {
 					rte_flow_error_set
 						(error, errno,
 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7764,26 +7784,27 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot get drop hash queue");
 					goto error;
 				}
-				dv->actions[n++] = dv->hrxq->action;
+				sflow_act[sidx].actions[n++] =
+						dv_handle->hrxq->action;
 			}
-		} else if (dev_flow->actions &
+		} else if (dv_handle->action_flags &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
 			MLX5_ASSERT(flow->rss.queue);
 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
 					     MLX5_RSS_HASH_KEY_LEN,
-					     dev_flow->hash_fields,
+					     sflow_act[sidx].hash_fields,
 					     (*flow->rss.queue),
 					     flow->rss.queue_num);
 			if (!hrxq) {
 				hrxq = mlx5_hrxq_new
 					(dev, flow->rss.key,
 					 MLX5_RSS_HASH_KEY_LEN,
-					 dev_flow->hash_fields,
+					 sflow_act[sidx].hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->layers &
+					 !!(dv_handle->layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -7793,47 +7814,45 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dv->hrxq = hrxq;
-			dv->actions[n++] = dv->hrxq->action;
+			dv_handle->hrxq = hrxq;
+			sflow_act[sidx].actions[n++] = hrxq->action;
 		}
-		dv->flow =
-			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
-						  (void *)&dv->value, n,
-						  dv->actions);
-		if (!dv->flow) {
+		matcher_obj = dv_handle->matcher->matcher_object;
+		dv_handle->flow =
+			mlx5_glue->dv_create_flow(matcher_obj,
+						  (void *)&dv_handle->value,
+						  n, sflow_act[sidx].actions);
+		if (!dv_handle->flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
 					   "hardware refuses to create flow");
 			goto error;
 		}
-		if (priv->vmwa_context &&
-		    dev_flow->dv.vf_vlan.tag &&
-		    !dev_flow->dv.vf_vlan.created) {
+		if (priv->vmwa_context && dv_handle->vf_vlan.tag &&
+		    !dv_handle->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dv_handle->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_dv *dv = &dev_flow->dv;
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		if (dv_handle->hrxq) {
+			if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dv_handle->hrxq);
+			dv_handle->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -7844,17 +7863,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_matcher_release(struct rte_eth_dev *dev,
-			struct mlx5_flow *flow)
+			struct mlx5_flow_dv_handle *handle)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_flow_dv_matcher *matcher = handle->matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -7877,17 +7896,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release an encap/decap resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->dv.encap_decap;
+						handle->encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -7910,17 +7929,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
-				  struct mlx5_flow *flow)
+				  struct mlx5_flow_dv_handle *handle)
 {
-	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = handle->jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -7944,17 +7963,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release a modify-header resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->dv.modify_hdr;
+						handle->modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -7975,17 +7994,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release port ID action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-		flow->dv.port_id_action;
+						handle->port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8006,17 +8025,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release push vlan action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_dv_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_dv_handle *handle)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-		flow->dv.push_vlan_res;
+						handle->push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8046,27 +8065,24 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_dv *dv;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		if (dv->flow) {
-			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
-			dv->flow = NULL;
+	SLIST_FOREACH(dv_handle, &flow->handles, next) {
+		if (dv_handle->flow) {
+			claim_zero(mlx5_glue->dv_destroy_flow(dv_handle->flow));
+			dv_handle->flow = NULL;
 		}
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dv_handle->hrxq) {
+			if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dv_handle->hrxq);
+			dv_handle->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
 	}
 }
 
@@ -8082,7 +8098,7 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *dv_handle;
 
 	if (!flow)
 		return;
@@ -8095,24 +8111,24 @@ struct field_modify_info modify_tcp[] = {
 		mlx5_flow_meter_detach(flow->meter);
 		flow->meter = NULL;
 	}
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->dv.matcher)
-			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->dv.encap_decap)
-			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->dv.modify_hdr)
-			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->dv.jump)
-			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->dv.port_id_action)
-			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->dv.push_vlan_res)
-			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->dv.tag_resource)
-			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
-		rte_free(dev_flow);
+	while (!SLIST_EMPTY(&flow->handles)) {
+		dv_handle = SLIST_FIRST(&flow->handles);
+		SLIST_REMOVE_HEAD(&flow->handles, next);
+		if (dv_handle->matcher)
+			flow_dv_matcher_release(dev, dv_handle);
+		if (dv_handle->encap_decap)
+			flow_dv_encap_decap_resource_release(dv_handle);
+		if (dv_handle->modify_hdr)
+			flow_dv_modify_hdr_resource_release(dv_handle);
+		if (dv_handle->jump)
+			flow_dv_jump_tbl_resource_release(dev, dv_handle);
+		if (dv_handle->port_id_action)
+			flow_dv_port_id_action_resource_release(dv_handle);
+		if (dv_handle->push_vlan_res)
+			flow_dv_push_vlan_action_resource_release(dv_handle);
+		if (dv_handle->tag_resource)
+			flow_dv_tag_release(dev, dv_handle->tag_resource);
+		rte_free(dv_handle);
 	}
 }
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                     ` (3 preceding siblings ...)
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

Some structures are defined in the mlx5_flow header file and only
used for flows with DV driver type. When using inbox driver, the DV
mode is not supported. But the code is unique and there is no need
to use pre-processing macro since all code could be compiled.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 42 ++++++++----------------------------------
 drivers/net/mlx5/mlx5_flow.h |  2 --
 2 files changed, 8 insertions(+), 36 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1121904..2b2ba20 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -716,7 +716,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  */
 static void
 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
-		       enum mlx5_flow_drv_type type __rte_unused,
+		       enum mlx5_flow_drv_type type,
 		       void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -726,7 +726,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	uint64_t layers;
 	unsigned int i;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (type == MLX5_FLOW_TYPE_DV) {
 		struct mlx5_flow_dv_handle *handle = sub_flow;
 		mark = !!(handle->action_flags &
@@ -735,16 +734,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = handle->m_flow;
 	} else {
-#endif
 		struct mlx5_flow *dev_flow = sub_flow;
 		mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 		layers = dev_flow->layers;
 		tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = dev_flow->flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -793,15 +789,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	struct mlx5_flow_dv_handle *handle;
 	if (type == MLX5_FLOW_TYPE_DV)
 		SLIST_FOREACH(handle, &flow->handles, next)
 			flow_drv_rxq_flags_set(dev, type, (void *)handle);
 	else
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			flow_drv_rxq_flags_set(dev, type, (void *)dev_flow);
 }
@@ -820,7 +814,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  */
 static void
 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
-			enum mlx5_flow_drv_type type __rte_unused,
+			enum mlx5_flow_drv_type type,
 			void *sub_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -831,7 +825,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (type == MLX5_FLOW_TYPE_DV) {
 		struct mlx5_flow_dv_handle *handle = sub_flow;
 		mark = !!(handle->action_flags &
@@ -840,16 +833,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = handle->m_flow;
 	} else {
-#endif
 		struct mlx5_flow *dev_flow = sub_flow;
 		mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 		layers = dev_flow->layers;
 		tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
 		flow = dev_flow->flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -894,15 +884,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	struct mlx5_flow_dv_handle *handle;
 	if (type == MLX5_FLOW_TYPE_DV)
 		SLIST_FOREACH(handle, &flow->handles, next)
 			flow_drv_rxq_flags_trim(dev, type, (void *)handle);
 	else
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow);
 }
@@ -2402,7 +2390,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			     struct rte_flow *flow)
 {
 	struct mlx5_flow *dev_flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_flow_dv_handle *handle;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
@@ -2411,13 +2398,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			if (handle->qrss_id)
 				flow_qrss_free_id(dev, handle->qrss_id);
 	} else {
-#endif
 		LIST_FOREACH(dev_flow, &flow->dev_flows, next)
 			if (dev_flow->qrss_id)
 				flow_qrss_free_id(dev, dev_flow->qrss_id);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 }
 
 static int
@@ -3509,18 +3493,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		return -rte_errno;
 	dev_flow->external = external;
 	dev_flow->flow = flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
 		SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next);
 		dev_flow->dv_handle->sidx = flow->sub_flows++;
 		dev_flow->dv_handle->m_flow = flow;
 	} else {
-#endif
 		/* Subflow obj was created, we must include one in the list. */
 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	}
-#endif
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3982,11 +3962,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->qrss_id = qrss_id;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 			if (flow->drv_type == MLX5_FLOW_TYPE_DV)
 				dev_flow->dv_handle->qrss_id = qrss_id;
-#endif
+			else
+				dev_flow->qrss_id = qrss_id;
 			qrss_id = 0;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
@@ -4098,11 +4077,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->mtr_flow_id = mtr_tag_id;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		if (flow->drv_type == MLX5_FLOW_TYPE_DV)
 			dev_flow->dv_handle->mtr_flow_id = mtr_tag_id;
-#endif
+		else
+			dev_flow->mtr_flow_id = mtr_tag_id;
 		/* Prepare the suffix flow match pattern. */
 		sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
 			     act_size);
@@ -4337,19 +4315,15 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
 			SLIST_INSERT_HEAD(&flow->handles,
 					  dev_flow->dv_handle, next);
 			dev_flow->dv_handle->sidx = flow->sub_flows++;
 			dev_flow->dv_handle->m_flow = flow;
 		} else {
-#endif
 			dev_flow->flow = flow;
 			LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		}
-#endif
 		dev_flow->external = 0;
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 10ac9c3..5e517c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -579,9 +579,7 @@ struct mlx5_flow {
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		struct mlx5_flow_dv_handle *dv_handle;
-#endif
 		struct mlx5_flow_verbs verbs;
 	};
 	union {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                     ` (4 preceding siblings ...)
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
@ 2020-02-04 11:33   ` Bing Zhao
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  7 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-02-04 11:33 UTC (permalink / raw)
  To: orika, viacheslavo, rasland, matan; +Cc: dev

The matcher value is a series of bits with specified format that
defined by the hardware interface. PMD driver needs to translate the
packet header into the matcher format and then used to create the
flow with the lower layer driver.
And this matcher value is only used when creating a flow, and when
destroying it, only the lower layer driver object related to the
matcher needs to be released. So there is no need to save such huge
block information of a device flow.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.h    |  2 --
 drivers/net/mlx5/mlx5_flow_dv.c | 28 ++++++++++++++++++++--------
 2 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5e517c3..af30438 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -513,8 +513,6 @@ struct mlx5_flow_dv {
 	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	struct mlx5_flow_dv_match_params value;
-	/**< Holds the value that the packet is compared to. */
 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
 	/**< Pointer to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 33a3d70..111b01d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -56,7 +56,7 @@
 
 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
 					  sizeof(struct rte_flow_item_ipv4))
-/* VLAN header definitions */
+/* VLAN header definitions. */
 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
@@ -75,15 +75,23 @@
 	uint32_t attr;
 };
 
+/* Maximal number of global temporary device flow. */
+#define MLX5DV_FLOW_HANDLE_MAX_NUM 8
 /* Global temporary device flow. */
 struct mlx5_flow sflow;
 /* Global subsidiary device flows actions' list. */
 struct {
 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+	/**< Action list. */
 	uint64_t hash_fields;
+	/**< Verbs hash Rx queue hash fields. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
 	int actions_n;
-	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
-} sflow_act[8];
+	/**< number of actions. */
+	uint8_t transfer;
+	/**< 1 if the flow is E-Switch flow. */
+} sflow_act[MLX5DV_FLOW_HANDLE_MAX_NUM];
 
 /**
  * Initialize flow attributes structure according to flow items' types.
@@ -5127,7 +5135,6 @@ struct field_modify_info modify_tcp[] = {
 	}
 	dev_flow->ingress = attr->ingress;
 	dev_flow->transfer = attr->transfer;
-	dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	/* DV support already defined, compiler will happy for inbox driver. */
 	dev_flow->dv_handle = dv_handle;
 	return dev_flow;
@@ -7097,7 +7104,7 @@ struct field_modify_info modify_tcp[] = {
 	union mlx5_flow_tbl_key tbl_key;
 	uint32_t modify_action_pos = UINT32_MAX;
 	void *match_mask = matcher.mask.buf;
-	void *match_value = dev_flow->dv_handle->value.buf;
+	void *match_value = &sflow_act[sidx].value.buf;
 	uint8_t next_protocol = 0xff;
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
@@ -7539,6 +7546,11 @@ struct field_modify_info modify_tcp[] = {
 	sflow_act[sidx].actions_n = actions_n;
 	sflow_act[sidx].transfer = dev_flow->transfer;
 	dev_flow->dv_handle->action_flags = action_flags;
+	/* Matcher size is fixed right now. */
+	sflow_act[sidx].value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/* Clear buffer in case of dirty content. */
+	memset(&sflow_act[sidx].value.buf, 0,
+	       MLX5_ST_SZ_BYTES(fte_match_param));
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7723,7 +7735,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
-					      dev_flow->dv_handle->value.buf));
+					      sflow_act[sidx].value.buf));
 #endif
 	dev_flow->dv_handle->layers = item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
@@ -7820,8 +7832,8 @@ struct field_modify_info modify_tcp[] = {
 		matcher_obj = dv_handle->matcher->matcher_object;
 		dv_handle->flow =
 			mlx5_glue->dv_create_flow(matcher_obj,
-						  (void *)&dv_handle->value,
-						  n, sflow_act[sidx].actions);
+						  &sflow_act[sidx].value, n,
+						  sflow_act[sidx].actions);
 		if (!dv_handle->flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                     ` (5 preceding siblings ...)
  2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
@ 2020-03-24 15:16   ` Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
                       ` (3 more replies)
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  7 siblings, 4 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:16 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

This patch set will remove the flow rules cache and move to the
non-cached mode for both DV and Verbs mode.

In the device closing stage, all the software resources for flows
created will be freed and corresponding hardware resources will be
released. Then the total cost of the memory will be reduced and the
behavior of mlx5 PMD will comply fully with the ethdev API
expectations.

After closing a device, all the flow rules stored in application
layer will no longer be valid anymore. Application should synchronize
the database and do not try to destory any rule on this device.
And after a device restarting, all the needed flow rules should be
reinserted via the create routine in the rte_flow lib.

---
v2 Changes:
    Fix the compiling error with MLX5 Debug mode in the 4th commit
    of "net/mlx5: introduce handle structure for DV flows".
v3 Changes:
    Refactor the device flow related structures to support non-cached
    mode for both Verbs and DV flows.
---

Bing Zhao (4):
  net/mlx5: change operations for non-cached flows
  net/mlx5: reorganize mlx5 flow structures
  net/mlx5: separate the flow handle resource
  net/mlx5: check device stat before creating flow

 drivers/net/mlx5/mlx5.c            |  18 ++-
 drivers/net/mlx5/mlx5.h            |   9 +-
 drivers/net/mlx5/mlx5_flow.c       | 197 +++++++++++++++++------
 drivers/net/mlx5/mlx5_flow.h       | 179 ++++++++++++++-------
 drivers/net/mlx5/mlx5_flow_dv.c    | 311 ++++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 156 +++++++++++--------
 drivers/net/mlx5/mlx5_trigger.c    |  26 ++--
 7 files changed, 573 insertions(+), 323 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
@ 2020-03-24 15:16     ` Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:16 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

When stopping a mlx5 device, all the flows inserted will be flushed
since they are with non-cached mode. And no more action will be done
for these flows in the device closing stage.
If the device restarts after stopped, no flow with non-cached mode
will be re-inserted.
The flush operation through rte interface will remain the same, and
all the flows will be flushed actively.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         | 11 +++++++++-
 drivers/net/mlx5/mlx5.h         |  5 ++++-
 drivers/net/mlx5/mlx5_flow.c    | 48 ++++++++++++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_trigger.c | 25 +++++++++++++--------
 4 files changed, 75 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 94aaa60..0613f70 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1234,8 +1234,17 @@ struct mlx5_flow_id_pool *
 	/* In case mlx5_dev_stop() has not been called. */
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_dev_interrupt_handler_devx_uninstall(dev);
+	/*
+	 * If default mreg copy action is removed at the stop stage,
+	 * the search will return none and nothing will be done anymore.
+	 */
+	mlx5_flow_stop_default(dev);
 	mlx5_traffic_disable(dev);
-	mlx5_flow_flush(dev, NULL);
+	/*
+	 * If all the flows are already flushed in the device stop stage,
+	 * then this will return directly without any action.
+	 */
+	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_flow_meter_flush(dev, NULL);
 	/* Prevent crashes when queues are still in use. */
 	dev->rx_pkt_burst = removed_rx_burst;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d7c519b..98e5fa5 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -712,7 +712,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
 				  struct rte_flow_error *error);
 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
-void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+			  bool active);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
@@ -725,6 +726,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 			 void *arg);
 int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_start_default(struct rte_eth_dev *dev);
+void mlx5_flow_stop_default(struct rte_eth_dev *dev);
 int mlx5_flow_verify(struct rte_eth_dev *dev);
 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2ef6558..81a85ec 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8,6 +8,7 @@
 #include <stdalign.h>
 #include <stdint.h>
 #include <string.h>
+#include <stdbool.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -4449,15 +4450,25 @@ struct rte_flow *
  *   Pointer to Ethernet device.
  * @param list
  *   Pointer to a TAILQ flow list.
+ * @param active
+ *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+		     bool active)
 {
+	uint32_t num_flushed = 0;
+
 	while (!TAILQ_EMPTY(list)) {
 		struct rte_flow *flow;
 
 		flow = TAILQ_FIRST(list);
 		flow_list_destroy(dev, list, flow);
+		num_flushed++;
+	}
+	if (active == true) {
+		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
+			dev->data->port_id, num_flushed);
 	}
 }
 
@@ -4523,6 +4534,37 @@ struct rte_flow *
 }
 
 /**
+ * Stop all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param list
+ *   Pointer to a TAILQ flow list.
+ */
+void
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
+{
+	flow_mreg_del_default_copy_action(dev);
+}
+
+/**
+ * Start all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start_default(struct rte_eth_dev *dev)
+{
+	struct rte_flow_error error;
+
+	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+	return flow_mreg_add_default_copy_action(dev, &error);
+}
+
+/**
  * Verify the flow list is empty
  *
  * @param dev
@@ -4737,7 +4779,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->flows, false);
 	return 0;
 }
 
@@ -5179,7 +5221,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->flows, false);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 571b7a0..b686ee8 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -269,7 +269,6 @@
 int
 mlx5_dev_start(struct rte_eth_dev *dev)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	int ret;
 	int fine_inline;
 
@@ -318,14 +317,19 @@
 	mlx5_stats_init(dev);
 	ret = mlx5_traffic_enable(dev);
 	if (ret) {
-		DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+		DRV_LOG(ERR, "port %u failed to set defaults flows",
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->flows);
+	/*
+	 * In non-cached mode, it only needs to start the default mreg copy
+	 * action and no flow created by application exists anymore.
+	 * But it is worth wrapping the interface for furthur usage.
+	 */
+	ret = mlx5_flow_start_default(dev);
 	if (ret) {
-		DRV_LOG(DEBUG, "port %u failed to set flows",
-			dev->data->port_id);
+		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
+			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
 	rte_wmb();
@@ -339,7 +343,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop_default(dev);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -369,8 +373,11 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop_default(dev);
+	/* Control flows for default traffic can be removed firstly. */
 	mlx5_traffic_disable(dev);
+	/* All RX queue flags will be cleared in the flush interface. */
+	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_txq_stop(dev);
@@ -529,7 +536,7 @@
 	return 0;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -546,7 +553,7 @@
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
 }
 
 /**
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
@ 2020-03-24 15:16     ` Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow Bing Zhao
  3 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:16 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

Common structures used for mlx5 flow creating and destroying are
reorganized in order to separating the parts only for destroying
from all the items.
The "mlx5_flow" will contain the common items of DV and Verbs flow,
specific items for DV / Verbs only. These items will only be used
when creating a flow.
At the end of "mlx5_flow", a nested structure "mlx5_flow_handle"
located. It contains all the items used both for creating and
destroying a flow. Also, it consists of common items, and DV / Verbs
specific items.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       |  43 ++++----
 drivers/net/mlx5/mlx5_flow.h       | 108 ++++++++++----------
 drivers/net/mlx5/mlx5_flow_dv.c    | 197 +++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  89 ++++++++---------
 4 files changed, 223 insertions(+), 214 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 81a85ec..230f071 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -720,9 +720,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +751,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -793,9 +793,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +820,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -2312,8 +2312,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	struct mlx5_flow *dev_flow;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->qrss_id)
-			flow_qrss_free_id(dev, dev_flow->qrss_id);
+		if (dev_flow->handle.qrss_id)
+			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
 }
 
 static int
@@ -2696,18 +2696,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	uint64_t layers = 0;
 
-	/* If no decap actions, use the layers directly. */
-	if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->layers;
+	/*
+	 * Layers bits could be localization, but usually the compiler will
+	 * help to do the optimization work for source code.
+	 * If no decap actions, use the layers directly.
+	 */
+	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle.layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3453,7 +3457,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->layers = prefix_layers;
+		dev_flow->handle.layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3968,8 +3972,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->qrss_id = qrss_id;
-			qrss_id = 0;
+			dev_flow->handle.qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -3984,6 +3987,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					      external, error);
 		if (ret < 0)
 			goto exit;
+		/* qrss ID should be freed if failed. */
+		qrss_id = 0;
 		MLX5_ASSERT(dev_flow);
 	}
 
@@ -4080,7 +4085,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->mtr_flow_id = mtr_tag_id;
+		dev_flow->handle.mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 13c8589..f3aea53 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -464,25 +464,28 @@ struct mlx5_flow_tbl_data_entry {
 	/**< jump resource, at most one for each table created. */
 };
 
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * In rdma-core file providers/mlx5/verbs.c
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+/* Verbs specification header. */
+struct ibv_spec_header {
+	enum ibv_flow_spec_type type;
+	uint16_t size;
+};
+
+struct mlx5_flow_rss {
+	uint32_t level;
+	uint32_t queue_num; /**< Number of entries in @p queue. */
+	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+};
 
-/* DV flows structure. */
-struct mlx5_flow_dv {
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+/** Device flow handle structure for DV mode only. */
+struct mlx5_flow_handle_dv {
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	struct mlx5_flow_dv_match_params value;
-	/**< Holds the value that the packet is compared to. */
 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
 	/**< Pointer to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
 	/**< Pointer to modify header resource in cache. */
-	struct ibv_flow *flow; /**< Installed flow. */
 	struct mlx5_flow_dv_jump_tbl_resource *jump;
 	/**< Pointer to the jump action resource. */
 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
@@ -493,65 +496,64 @@ struct mlx5_flow_dv {
 	/**< Pointer to push VLAN action resource in cache. */
 	struct mlx5_flow_dv_tag_resource *tag_resource;
 	/**< pointer to the tag action. */
+};
+
+/** Device flow handle structure: used both for creating & destroying. */
+struct mlx5_flow_handle {
+	uint64_t layers;
+	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t act_flags;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+	void *ib_flow; /**< Verbs flow pointer. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
+	union {
+		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
-	/**< Action list. */
+	struct mlx5_flow_handle_dv dvh;
 #endif
-	int actions_n; /**< number of actions. */
 };
 
-/* Verbs specification header. */
-struct ibv_spec_header {
-	enum ibv_flow_spec_type type;
-	uint16_t size;
-};
+/*
+ * Max number of actions per DV flow.
+ * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
+ * in rdma-core file providers/mlx5/verbs.c.
+ */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
-	LIST_ENTRY(mlx5_flow_verbs) next;
-	unsigned int size; /**< Size of the attribute. */
-	struct {
-		struct ibv_flow_attr *attr;
-		/**< Pointer to the Specification buffer. */
-		uint8_t *specs; /**< Pointer to the specifications. */
-	};
-	struct ibv_flow *flow; /**< Verbs flow pointer. */
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
-	struct mlx5_vf_vlan vf_vlan;
-	/**< Structure for VF VLAN workaround. */
+/** Device flow structure only for DV flow creation. */
+struct mlx5_flow_resource_dv {
+	uint32_t group; /**< The group index. */
+	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+	int actions_n; /**< number of actions. */
+	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
 };
 
-struct mlx5_flow_rss {
-	uint32_t level;
-	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
-	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
-	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+/** Device flow structure only for Verbs flow creation. */
+struct mlx5_flow_resource_verbs {
+	unsigned int size; /**< Size of the attribute. */
+	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
+	uint8_t *specs; /**< Pointer to the specifications. */
 };
 
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next;
+	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
-	uint64_t layers;
-	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
-	uint64_t actions;
-	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
-	uint32_t group; /**< The group index. */
-	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_dv dv;
+		struct mlx5_flow_resource_dv dv;
 #endif
-		struct mlx5_flow_verbs verbs;
-	};
-	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
-		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+		struct mlx5_flow_resource_verbs verbs;
 	};
-	bool external; /**< true if the flow is created external to PMD. */
+	struct mlx5_flow_handle handle;
 };
 
 /* Flow meter state. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2090631..d1eec96 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,20 +92,22 @@
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
+	uint64_t layers = dev_flow->handle.layers;
+
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
 	 * suffix flow, the layers flags is set by the prefix flow. Need to
 	 * use the layer flags from prefix flow as the suffix flow may not
 	 * have the user defined items as the flow is split.
 	 */
-	if (dev_flow->layers) {
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+	if (layers) {
+		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
 			attr->ipv4 = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
 			attr->ipv6 = 1;
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+		if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
 			attr->tcp = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
 			attr->udp = 1;
 		attr->valid = 1;
 		return;
@@ -2377,7 +2379,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 	struct mlx5dv_dr_domain *domain;
 
-	resource->flags = dev_flow->group ? 0 : 1;
+	resource->flags = dev_flow->dv.group ? 0 : 1;
 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
@@ -2397,7 +2399,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.encap_decap = cache_resource;
+			dev_flow->handle.dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2423,7 +2425,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->dv.encap_decap = cache_resource;
+	dev_flow->handle.dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2474,7 +2476,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->dv.jump = &tbl_data->jump;
+	dev_flow->handle.dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2512,7 +2514,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.port_id_action = cache_resource;
+			dev_flow->handle.dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2540,7 +2542,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->dv.port_id_action = cache_resource;
+	dev_flow->handle.dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2583,7 +2585,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.push_vlan_res = cache_resource;
+			dev_flow->handle.dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2612,7 +2614,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->dv.push_vlan_res = cache_resource;
+	dev_flow->handle.dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3699,8 +3701,8 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5dv_dr_domain *ns;
 	uint32_t actions_len;
 
-	resource->flags =
-		dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+	resource->flags = dev_flow->dv.group ? 0 :
+			  MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
 				    resource->flags))
 		return rte_flow_error_set(error, EOVERFLOW,
@@ -3725,7 +3727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.modify_hdr = cache_resource;
+			dev_flow->handle.dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3752,7 +3754,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->dv.modify_hdr = cache_resource;
+	dev_flow->handle.dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5236,7 +5238,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
 }
 
@@ -5392,7 +5394,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->dv.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6893,7 +6895,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->dv.matcher = cache_matcher;
+			dev_flow->handle.dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6930,7 +6932,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->dv.matcher = cache_matcher;
+	dev_flow->handle.dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6972,7 +6974,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->dv.tag_resource = cache_resource;
+		dev_flow->handle.dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7001,7 +7003,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->dv.tag_resource = cache_resource;
+	dev_flow->handle.dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7146,7 +7148,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->layers;
+	uint64_t items = dev_flow->handle.layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7271,7 +7273,7 @@ struct field_modify_info modify_tcp[] = {
 				       !!priv->fdb_def_rule, &table, error);
 	if (ret)
 		return ret;
-	dev_flow->group = table;
+	dev_flow->dv.group = table;
 	if (attr->transfer)
 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
 	if (priority == MLX5_FLOW_PRIO_RSVD)
@@ -7304,7 +7306,7 @@ struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.port_id_action->action;
+				dev_flow->handle.dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7322,12 +7324,12 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7349,12 +7351,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7404,9 +7406,9 @@ struct field_modify_info modify_tcp[] = {
 				goto cnt_err;
 			}
 			flow->counter = flow_dv_counter_alloc(dev,
-							      count->shared,
-							      count->id,
-							      dev_flow->group);
+							count->shared,
+							count->id,
+							dev_flow->dv.group);
 			if (flow->counter == NULL)
 				goto cnt_err;
 			dev_flow->dv.actions[actions_n++] =
@@ -7452,7 +7454,7 @@ struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-					   dev_flow->dv.push_vlan_res->action;
+				dev_flow->handle.dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7479,7 +7481,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7489,7 +7491,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7499,7 +7501,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7507,7 +7509,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7519,7 +7521,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7551,7 +7553,7 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.jump->action;
+				dev_flow->handle.dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7684,7 +7686,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-					dev_flow->dv.modify_hdr->verbs_action;
+				dev_flow->handle.dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7695,7 +7697,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7728,7 +7730,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv4(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
@@ -7751,7 +7753,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv6(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
@@ -7900,7 +7902,7 @@ struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->layers |= item_flags;
+	dev_flow->handle.layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7911,7 +7913,7 @@ struct field_modify_info modify_tcp[] = {
 	/* reserved field no needs to be set to 0 here. */
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
-	tbl_key.table_id = dev_flow->group;
+	tbl_key.table_id = dev_flow->dv.group;
 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
 		return -rte_errno;
 	return 0;
@@ -7935,21 +7937,25 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+		dh = &dev_flow->handle;
 		dv = &dev_flow->dv;
 		n = dv->actions_n;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			if (dev_flow->transfer) {
+		dv_h = &dh->dvh;
+		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
 			} else {
-				dv->hrxq = mlx5_hrxq_drop_new(dev);
-				if (!dv->hrxq) {
+				dh->hrxq = mlx5_hrxq_drop_new(dev);
+				if (!dh->hrxq) {
 					rte_flow_error_set
 						(error, errno,
 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7957,9 +7963,9 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot get drop hash queue");
 					goto error;
 				}
-				dv->actions[n++] = dv->hrxq->action;
+				dv->actions[n++] = dh->hrxq->action;
 			}
-		} else if (dev_flow->actions &
+		} else if (dh->act_flags &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
@@ -7976,7 +7982,7 @@ struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->layers &
+					 !!(dev_flow->handle.layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -7986,14 +7992,14 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dv->hrxq = hrxq;
-			dv->actions[n++] = dv->hrxq->action;
+			dh->hrxq = hrxq;
+			dv->actions[n++] = dh->hrxq->action;
 		}
-		dv->flow =
-			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+		dh->ib_flow =
+			mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
 						  (void *)&dv->value, n,
 						  dv->actions);
-		if (!dv->flow) {
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -8001,32 +8007,30 @@ struct field_modify_info modify_tcp[] = {
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->dv.vf_vlan.tag &&
-		    !dev_flow->dv.vf_vlan.created) {
+		    dh->vf_vlan.tag && !dh->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_dv *dv = &dev_flow->dv;
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
+		if (dh_tmp->hrxq) {
+			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh_tmp->hrxq);
+			dh_tmp->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8047,7 +8051,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_matcher_release(struct rte_eth_dev *dev,
 			struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8080,7 +8084,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->dv.encap_decap;
+						flow->handle.dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8113,7 +8117,8 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
 				  struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+						flow->handle.dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8147,7 +8152,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->dv.modify_hdr;
+						flow->handle.dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8178,7 +8183,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-		flow->dv.port_id_action;
+						flow->handle.dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8209,7 +8214,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-		flow->dv.push_vlan_res;
+						flow->handle.dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8239,27 +8244,26 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		if (dv->flow) {
-			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
-			dv->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -8291,20 +8295,21 @@ struct field_modify_info modify_tcp[] = {
 	while (!LIST_EMPTY(&flow->dev_flows)) {
 		dev_flow = LIST_FIRST(&flow->dev_flows);
 		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->dv.matcher)
+		if (dev_flow->handle.dvh.matcher)
 			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->dv.encap_decap)
+		if (dev_flow->handle.dvh.encap_decap)
 			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->dv.modify_hdr)
+		if (dev_flow->handle.dvh.modify_hdr)
 			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->dv.jump)
+		if (dev_flow->handle.dvh.jump)
 			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->dv.port_id_action)
+		if (dev_flow->handle.dvh.port_id_action)
 			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->dv.push_vlan_res)
+		if (dev_flow->handle.dvh.push_vlan_res)
 			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->dv.tag_resource)
-			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
+		if (dev_flow->handle.dvh.tag_resource)
+			flow_dv_tag_release(dev,
+					dev_flow->handle.dvh.tag_resource);
 		rte_free(dev_flow);
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 459e7b6..08185ec 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,8 @@
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
+flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+		    void *src, unsigned int size)
 {
 	void *dst;
 
@@ -393,7 +394,7 @@
 	else
 		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->verbs.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -743,7 +744,7 @@
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -1418,7 +1419,7 @@
 	dev_flow->verbs.attr = (void *)(dev_flow + 1);
 	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
 }
 
@@ -1498,7 +1499,7 @@
 						  "action not supported");
 		}
 	}
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1600,7 +1601,7 @@
 						  "item not supported");
 		}
 	}
-	dev_flow->layers = item_flags;
+	dev_flow->handle.layers = item_flags;
 	dev_flow->verbs.attr->priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
 	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
@@ -1618,28 +1619,26 @@
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->flow) {
-			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
-			verbs->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -1688,15 +1687,15 @@
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			verbs->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!verbs->hrxq) {
+		dh = &dev_flow->handle;
+		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
+			dh->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!dh->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1714,12 +1713,12 @@
 					     flow->rss.queue_num);
 			if (!hrxq)
 				hrxq = mlx5_hrxq_new(dev, flow->rss.key,
-						     MLX5_RSS_HASH_KEY_LEN,
-						     dev_flow->hash_fields,
-						     (*flow->rss.queue),
-						     flow->rss.queue_num,
-						     !!(dev_flow->layers &
-						       MLX5_FLOW_LAYER_TUNNEL));
+						MLX5_RSS_HASH_KEY_LEN,
+						dev_flow->hash_fields,
+						(*flow->rss.queue),
+						flow->rss.queue_num,
+						!!(dev_flow->handle.layers &
+						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -1727,11 +1726,11 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			verbs->hrxq = hrxq;
+			dh->hrxq = hrxq;
 		}
-		verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
-						     verbs->attr);
-		if (!verbs->flow) {
+		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
+						     dev_flow->verbs.attr);
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1739,33 +1738,31 @@
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->verbs.vf_vlan.tag &&
-		    !dev_flow->verbs.vf_vlan.created) {
+		    dev_flow->handle.vf_vlan.tag &&
+		    !dev_flow->handle.vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		dh = &dev_flow->handle;
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
@ 2020-03-24 15:16     ` Bing Zhao
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow Bing Zhao
  3 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:16 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

Only the members of flow handle structure will be used when trying
to destroy a flow. Other members of mlx5 device flow resource will
only be used for flow creating, and they could be reused for different
flows.
So only the device flow handle structure needs to be saved for further
usage. This could be separated from the whole mlx5 device flow and
stored with a list for each rte flow.
Other members will be pre-allocated with an array, and an index will
be used to help to apply each device flow to the hardware.
The flow handle sizes of Verbs and DV mode will be different, and
some calculation could be done before allocating a verbs handle.
Then the total memory consumption will less for Verbs when there is
no inbox driver being used.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c            |   7 ++
 drivers/net/mlx5/mlx5.h            |   4 +
 drivers/net/mlx5/mlx5_flow.c       | 127 +++++++++++++-------
 drivers/net/mlx5/mlx5_flow.h       |  89 ++++++++++++--
 drivers/net/mlx5/mlx5_flow_dv.c    | 230 +++++++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 139 ++++++++++++----------
 drivers/net/mlx5/mlx5_trigger.c    |   1 +
 7 files changed, 381 insertions(+), 216 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0613f70..8dda0c3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1246,6 +1246,8 @@ struct mlx5_flow_id_pool *
 	 */
 	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_flow_meter_flush(dev, NULL);
+	/* Free the intermediate buffers for flow creation. */
+	mlx5_flow_free_intermediate(dev);
 	/* Prevent crashes when queues are still in use. */
 	dev->rx_pkt_burst = removed_rx_burst;
 	dev->tx_pkt_burst = removed_tx_burst;
@@ -2768,6 +2770,11 @@ struct mlx5_flow_id_pool *
 			err = ENOTSUP;
 			goto error;
 	}
+	/*
+	 * Allocate the buffer for flow creating, just once.
+	 * The allocation must be done before any flow creating.
+	 */
+	mlx5_flow_alloc_intermediate(eth_dev);
 	/* Query availibility of metadata reg_c's. */
 	err = mlx5_flow_discover_mreg_c(eth_dev);
 	if (err < 0) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 98e5fa5..2cc4c76 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -517,6 +517,8 @@ struct mlx5_priv {
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
+	void *inter_flows; /* Intermediate resources for flow creation. */
+	int flow_idx; /* Intermediate device flow index. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
 	LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -728,6 +730,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
 int mlx5_flow_start_default(struct rte_eth_dev *dev);
 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
+void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
+void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
 int mlx5_flow_verify(struct rte_eth_dev *dev);
 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 230f071..bf0728d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -712,17 +712,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+		       struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +753,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -773,10 +775,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_set(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_set(dev, flow, dev_handle);
 }
 
 /**
@@ -785,17 +787,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to the device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+			struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +824,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -843,10 +847,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_trim(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_trim(dev, flow, dev_handle);
 }
 
 /**
@@ -2309,11 +2313,11 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
 			     struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->handle.qrss_id)
-			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		if (dev_handle->qrss_id)
+			flow_qrss_free_id(dev, dev_handle->qrss_id);
 }
 
 static int
@@ -2329,7 +2333,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 }
 
 static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+		  const struct rte_flow_attr *attr __rte_unused,
 		  const struct rte_flow_item items[] __rte_unused,
 		  const struct rte_flow_action actions[] __rte_unused,
 		  struct rte_flow_error *error)
@@ -2469,6 +2474,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   setting backward reference to the flow should be done out of this function.
  *   layers field is not filled either.
  *
+ * @param[in] dev
+ *   Pointer to the dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -2482,7 +2489,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
  */
 static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+		 const struct rte_flow *flow,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
@@ -2493,7 +2501,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
-	return fops->prepare(attr, items, actions, error);
+	return fops->prepare(dev, attr, items, actions, error);
 }
 
 /**
@@ -2701,17 +2709,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * help to do the optimization work for source code.
 	 * If no decap actions, use the layers directly.
 	 */
-	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->handle.layers;
+	if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle->layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3412,7 +3420,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
@@ -3445,19 +3453,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_flow *dev_flow;
 
-	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
 	if (!dev_flow)
 		return -rte_errno;
 	dev_flow->flow = flow;
 	dev_flow->external = external;
 	/* Subflow object was created, we must include one in the list. */
-	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+	LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 	/*
 	 * If dev_flow is as one of the suffix flow, some actions in suffix
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->handle.layers = prefix_layers;
+		dev_flow->handle->layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3972,7 +3980,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->handle.qrss_id = qrss_id;
+			dev_flow->handle->qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -4085,7 +4093,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->handle.mtr_flow_id = mtr_tag_id;
+		dev_flow->handle->mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
@@ -4256,7 +4264,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
-	LIST_INIT(&flow->dev_flows);
+	LIST_INIT(&flow->dev_handles);
 	if (rss && rss->types) {
 		unsigned int graph_root;
 
@@ -4271,6 +4279,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		buf->entries = 1;
 		buf->entry[0].pattern = (void *)(uintptr_t)items;
 	}
+	/* Reset device flow index to 0. */
+	priv->flow_idx = 0;
 	for (i = 0; i < buf->entries; ++i) {
 		/*
 		 * The splitter may create multiple dev_flows,
@@ -4289,13 +4299,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
 		attr_tx.ingress = 0;
 		attr_tx.egress = 1;
-		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
 		dev_flow->flow = flow;
 		dev_flow->external = 0;
-		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+		LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
 					 actions_hairpin_tx.actions, error);
@@ -4543,8 +4553,6 @@ struct rte_flow *
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to a TAILQ flow list.
  */
 void
 mlx5_flow_stop_default(struct rte_eth_dev *dev)
@@ -4570,6 +4578,37 @@ struct rte_flow *
 }
 
 /**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!priv->inter_flows)
+		priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
+					       sizeof(struct mlx5_flow), 0);
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_free(priv->inter_flows);
+	priv->inter_flows = NULL;
+}
+
+/**
  * Verify the flow list is empty
  *
  * @param dev
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f3aea53..0f0e59d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -500,6 +500,8 @@ struct mlx5_flow_handle_dv {
 
 /** Device flow handle structure: used both for creating & destroying. */
 struct mlx5_flow_handle {
+	LIST_ENTRY(mlx5_flow_handle) next;
+	/**< Pointer to next device flow handle. */
 	uint64_t layers;
 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
 	uint64_t act_flags;
@@ -517,6 +519,18 @@ struct mlx5_flow_handle {
 };
 
 /*
+ * Size for Verbs device flow handle structure only. Do not use the DV only
+ * structure in Verbs. No DV flows attributes will be accessed.
+ * Macro offsetof() could also be used here.
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#define MLX5_FLOW_HANDLE_VERBS_SIZE \
+	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
+#else
+#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
+#endif
+
+/*
  * Max number of actions per DV flow.
  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
  * in rdma-core file providers/mlx5/verbs.c.
@@ -524,7 +538,7 @@ struct mlx5_flow_handle {
 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
 /** Device flow structure only for DV flow creation. */
-struct mlx5_flow_resource_dv {
+struct mlx5_flow_dv_workspace {
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	int actions_n; /**< number of actions. */
@@ -533,27 +547,79 @@ struct mlx5_flow_resource_dv {
 	/**< Holds the value that the packet is compared to. */
 };
 
+/*
+ * Maximal Verbs flow specifications & actions size.
+ * Some elements are mutually exclusive, but enough space should be allocated.
+ * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
+ *               2. One tunnel header (exception: GRE + MPLS),
+ *                  SPEC length: GRE == tunnel.
+ * Actions: 1. 1 Mark OR Flag.
+ *          2. 1 Drop (if any).
+ *          3. No limitation for counters, but it makes no sense to support too
+ *             many counters in a single device flow.
+ */
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_gre) + \
+			sizeof(struct ibv_flow_spec_mpls)) \
+		)
+#else
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_tunnel)) \
+		)
+#endif
+
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) + \
+			sizeof(struct ibv_flow_spec_counter_action) * 4 \
+		)
+#else
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) \
+		)
+#endif
+
+#define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
+		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
+
 /** Device flow structure only for Verbs flow creation. */
-struct mlx5_flow_resource_verbs {
+struct mlx5_flow_verbs_workspace {
 	unsigned int size; /**< Size of the attribute. */
-	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
-	uint8_t *specs; /**< Pointer to the specifications. */
+	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
+	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
+	/**< Specifications & actions buffer of verbs flow. */
 };
 
+/** Maximal number of device sub-flows supported. */
+#define MLX5_NUM_MAX_DEV_FLOWS 32
+
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
 	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_resource_dv dv;
+		struct mlx5_flow_dv_workspace dv;
 #endif
-		struct mlx5_flow_resource_verbs verbs;
+		struct mlx5_flow_verbs_workspace verbs;
 	};
-	struct mlx5_flow_handle handle;
+	struct mlx5_flow_handle *handle;
 };
 
 /* Flow meter state. */
@@ -667,8 +733,8 @@ struct rte_flow {
 	struct mlx5_flow_mreg_copy_resource *mreg_copy;
 	/**< pointer to metadata register copy table resource. */
 	struct mlx5_flow_meter *meter; /**< Holds flow meter. */
-	LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
-	/**< Device flows that are part of the flow. */
+	LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+	/**< Device flow handles that are part of the flow. */
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
@@ -681,7 +747,8 @@ typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
 				    bool external,
 				    struct rte_flow_error *error);
 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
-	(const struct rte_flow_attr *attr, const struct rte_flow_item items[],
+	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+	 const struct rte_flow_item items[],
 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
 				     struct mlx5_flow *dev_flow,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d1eec96..d532ce0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,7 +92,7 @@
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
-	uint64_t layers = dev_flow->handle.layers;
+	uint64_t layers = dev_flow->handle->layers;
 
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
@@ -2399,7 +2399,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.encap_decap = cache_resource;
+			dev_flow->handle->dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2425,7 +2425,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->handle.dvh.encap_decap = cache_resource;
+	dev_flow->handle->dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2476,7 +2476,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->handle.dvh.jump = &tbl_data->jump;
+	dev_flow->handle->dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2514,7 +2514,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.port_id_action = cache_resource;
+			dev_flow->handle->dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2542,7 +2542,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->handle.dvh.port_id_action = cache_resource;
+	dev_flow->handle->dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2585,7 +2585,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.push_vlan_res = cache_resource;
+			dev_flow->handle->dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2614,7 +2614,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->handle.dvh.push_vlan_res = cache_resource;
+	dev_flow->handle->dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3727,7 +3727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.modify_hdr = cache_resource;
+			dev_flow->handle->dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3754,7 +3754,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->handle.dvh.modify_hdr = cache_resource;
+	dev_flow->handle->dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5207,6 +5207,8 @@ struct field_modify_info modify_tcp[] = {
  * Internal preparation function. Allocates the DV flow size,
  * this size is constant.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -5221,22 +5223,41 @@ struct field_modify_info modify_tcp[] = {
  *   otherwise NULL and rte_errno is set.
  */
 static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr __rte_unused,
 		const struct rte_flow_item items[] __rte_unused,
 		const struct rte_flow_action actions[] __rte_unused,
 		struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow);
+	size_t size = sizeof(struct mlx5_flow_handle);
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, size, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/*
+	 * The matching value needs to be cleared to 0 before using. In the
+	 * past, it will be automaticlly cleared when using rte_*alloc
+	 * API. The time consumption will be almost the same as before.
+	 */
+	memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
 	dev_flow->ingress = attr->ingress;
 	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
@@ -5394,7 +5415,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6895,7 +6916,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->handle.dvh.matcher = cache_matcher;
+			dev_flow->handle->dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6932,7 +6953,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->handle.dvh.matcher = cache_matcher;
+	dev_flow->handle->dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6974,7 +6995,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->handle.dvh.tag_resource = cache_resource;
+		dev_flow->handle->dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7003,7 +7024,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->handle.dvh.tag_resource = cache_resource;
+	dev_flow->handle->dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7148,7 +7169,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->handle.layers;
+	uint64_t items = dev_flow->handle->layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7238,6 +7259,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *dev_conf = &priv->config;
 	struct rte_flow *flow = dev_flow->flow;
+	struct mlx5_flow_handle *handle = dev_flow->handle;
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint64_t action_flags = 0;
@@ -7306,7 +7328,7 @@ struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.port_id_action->action;
+					handle->dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7324,12 +7346,17 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			/*
+			 * Only one FLAG or MARK is supported per device flow
+			 * right now. So the pointer to the tag resource must be
+			 * zero before the register process.
+			 */
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7351,12 +7378,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7454,7 +7481,7 @@ struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.push_vlan_res->action;
+					handle->dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7481,7 +7508,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7491,7 +7518,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7501,7 +7528,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7509,7 +7536,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7521,7 +7548,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7553,7 +7580,7 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.jump->action;
+					handle->dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7686,7 +7713,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-				dev_flow->handle.dvh.modify_hdr->verbs_action;
+					handle->dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7697,7 +7724,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->handle.act_flags = action_flags;
+	handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7902,7 +7929,7 @@ struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->handle.layers |= item_flags;
+	handle->layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7937,19 +7964,21 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_dv_workspace *dv;
 	struct mlx5_flow_handle *dh;
 	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
+	int idx;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
 		dv = &dev_flow->dv;
-		n = dv->actions_n;
+		dh = dev_flow->handle;
 		dv_h = &dh->dvh;
+		n = dv->actions_n;
 		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
 			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
@@ -7982,7 +8011,7 @@ struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->handle.layers &
+					 !!(dh->layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -8020,17 +8049,16 @@ struct field_modify_info modify_tcp[] = {
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
-		if (dh_tmp->hrxq) {
-			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
+		if (dh->hrxq) {
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh_tmp->hrxq);
-			dh_tmp->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8041,17 +8069,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_matcher_release(struct rte_eth_dev *dev,
-			struct mlx5_flow *flow)
+			struct mlx5_flow_handle *handle)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
+	struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8074,17 +8102,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release an encap/decap resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->handle.dvh.encap_decap;
+						handle->dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8107,18 +8135,18 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
-				  struct mlx5_flow *flow)
+				  struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
-						flow->handle.dvh.jump;
+							handle->dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8142,17 +8170,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release a modify-header resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->handle.dvh.modify_hdr;
+							handle->dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8173,17 +8201,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release port ID action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-						flow->handle.dvh.port_id_action;
+						handle->dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8204,17 +8232,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release push vlan action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-						flow->handle.dvh.push_vlan_res;
+						handle->dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8245,18 +8273,16 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
 		if (dh->ib_flow) {
 			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
 			dh->ib_flow = NULL;
 		}
 		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, dh->hrxq);
@@ -8279,7 +8305,7 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
 	if (!flow)
 		return;
@@ -8292,25 +8318,25 @@ struct field_modify_info modify_tcp[] = {
 		mlx5_flow_meter_detach(flow->meter);
 		flow->meter = NULL;
 	}
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->handle.dvh.matcher)
-			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.encap_decap)
-			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.modify_hdr)
-			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.jump)
-			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.port_id_action)
-			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.push_vlan_res)
-			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.tag_resource)
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		dev_handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(dev_handle, next);
+		if (dev_handle->dvh.matcher)
+			flow_dv_matcher_release(dev, dev_handle);
+		if (dev_handle->dvh.encap_decap)
+			flow_dv_encap_decap_resource_release(dev_handle);
+		if (dev_handle->dvh.modify_hdr)
+			flow_dv_modify_hdr_resource_release(dev_handle);
+		if (dev_handle->dvh.jump)
+			flow_dv_jump_tbl_resource_release(dev, dev_handle);
+		if (dev_handle->dvh.port_id_action)
+			flow_dv_port_id_action_resource_release(dev_handle);
+		if (dev_handle->dvh.push_vlan_res)
+			flow_dv_push_vlan_action_resource_release(dev_handle);
+		if (dev_handle->dvh.tag_resource)
 			flow_dv_tag_release(dev,
-					dev_flow->handle.dvh.tag_resource);
-		rte_free(dev_flow);
+					    dev_handle->dvh.tag_resource);
+		rte_free(dev_handle);
 	}
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 08185ec..ccd3395 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,7 @@
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
 		    void *src, unsigned int size)
 {
 	void *dst;
@@ -263,7 +263,7 @@
 	MLX5_ASSERT(verbs->specs);
 	dst = (void *)(verbs->specs + verbs->size);
 	memcpy(dst, src, size);
-	++verbs->attr->num_of_specs;
+	++verbs->attr.num_of_specs;
 	verbs->size += size;
 }
 
@@ -392,9 +392,9 @@
 	if (!(item_flags & l2m))
 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
 	else
-		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
+		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -744,7 +744,7 @@
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -774,11 +774,11 @@
 	}
 #endif
 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV4_EXT,
 						       IPPROTO_GRE);
 	else
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV6,
 						       IPPROTO_GRE);
 	flow_verbs_spec_add(verbs, &tunnel, size);
@@ -1385,6 +1385,8 @@
  * The required size is calculate based on the actions and items. This function
  * also returns the detected actions and items for later use.
  *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -1399,25 +1401,45 @@
  *   is set.
  */
 static struct mlx5_flow *
-flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_verbs_prepare(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr __rte_unused,
 		   const struct rte_flow_item items[],
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+	size_t size = 0;
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
 	size += flow_verbs_get_actions_size(actions);
 	size += flow_verbs_get_items_size(items);
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
+		rte_flow_error_set(error, E2BIG,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Verbs spec/action size too large");
+		return NULL;
+	}
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
-	dev_flow->verbs.attr = (void *)(dev_flow + 1);
-	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
+	/* Memcpy is used, only size needs to be cleared to 0. */
+	dev_flow->verbs.size = 0;
+	dev_flow->verbs.attr.num_of_specs = 0;
 	dev_flow->ingress = attr->ingress;
 	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
@@ -1499,7 +1521,7 @@
 						  "action not supported");
 		}
 	}
-	dev_flow->handle.act_flags = action_flags;
+	dev_flow->handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1601,10 +1623,11 @@
 						  "item not supported");
 		}
 	}
-	dev_flow->handle.layers = item_flags;
-	dev_flow->verbs.attr->priority =
+	dev_flow->handle->layers = item_flags;
+	/* Other members of attr will be ignored. */
+	dev_flow->verbs.attr.priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
-	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
+	dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
 	return 0;
 }
 
@@ -1619,26 +1642,24 @@
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->ib_flow) {
-			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
-			dh->ib_flow = NULL;
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
+			handle->ib_flow = NULL;
 		}
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 }
 
@@ -1653,15 +1674,15 @@
 static void
 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
 	flow_verbs_remove(dev, flow);
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		rte_free(dev_flow);
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(handle, next);
+		rte_free(handle);
 	}
 	if (flow->counter) {
 		flow_verbs_counter_release(dev, flow->counter);
@@ -1687,15 +1708,17 @@
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle *handle;
 	struct mlx5_flow *dev_flow;
 	int err;
-
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
-			dh->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!dh->hrxq) {
+	int idx;
+
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
+		handle = dev_flow->handle;
+		if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+			handle->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!handle->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1717,7 +1740,7 @@
 						dev_flow->hash_fields,
 						(*flow->rss.queue),
 						flow->rss.queue_num,
-						!!(dev_flow->handle.layers &
+						!!(handle->layers &
 						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
@@ -1726,11 +1749,11 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->hrxq = hrxq;
+			handle->hrxq = hrxq;
 		}
-		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
-						     dev_flow->verbs.attr);
-		if (!dh->ib_flow) {
+		handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+						     &dev_flow->verbs.attr);
+		if (!handle->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1738,31 +1761,29 @@
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->handle.vf_vlan.tag &&
-		    !dev_flow->handle.vf_vlan.created) {
+		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index b686ee8..7bcfe5e 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -307,6 +307,7 @@
 		mlx5_txq_stop(dev);
 		return -rte_errno;
 	}
+	/* Set started flag here for the following steps like control flow. */
 	dev->data->dev_started = 1;
 	ret = mlx5_rx_intr_vec_enable(dev);
 	if (ret) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                       ` (2 preceding siblings ...)
  2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource Bing Zhao
@ 2020-03-24 15:16     ` Bing Zhao
  3 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:16 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

By default, flows are categorized into two types of a mlx5 device.
  1. The PMD driver will create some default flows to enable the
     traffic and give some default behaviors on the packets. And
     this is transparent to the upper layer application.
  2. Other flows will be created in the application based on its
     needs.
When in the old cached mode for application flows, it is allowed
to created the flow before the device is started. And when
starting the device, all the flows will be applied to the hardware
and take effect. The cached flows will be also applied in the same
time.
In non-cached mode, all the flows will never be cached when stopping
a device. So it makes no sense to insert any flow into the device
before it is started. Default flows owned by PMD driver are not
affected in this case.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index bf0728d..18c881b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4328,7 +4328,11 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		if (ret)
 			goto error;
 	}
-	if (dev->data->dev_started) {
+	/*
+	 * If the flow is external (from application) OR device is started, then
+	 * the flow will be applied immediately.
+	 */
+	if (external || dev->data->dev_started) {
 		ret = flow_drv_apply(dev, flow, error);
 		if (ret < 0)
 			goto error;
@@ -4420,6 +4424,17 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
+	/*
+	 * If the device is not started yet, it is not allowed to created a
+	 * flow from application. PMD default flows and traffic control flows
+	 * are not affected.
+	 */
+	if (unlikely(!dev->data->dev_started)) {
+		rte_errno = ENODEV;
+		DRV_LOG(DEBUG, "port %u is not started when "
+			"inserting a flow", dev->data->port_id);
+		return NULL;
+	}
 	return flow_list_create(dev, &priv->flows,
 				attr, items, actions, true, error);
 }
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules
  2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                     ` (6 preceding siblings ...)
  2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
@ 2020-03-24 15:33   ` Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
                       ` (5 more replies)
  7 siblings, 6 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:33 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

This patch set will remove the flow rules cache and move to the
non-cached mode for both DV and Verbs mode.

In the device closing stage, all the software resources for flows
created will be freed and corresponding hardware resources will be
released. Then the total cost of the memory will be reduced and the
behavior of mlx5 PMD will comply fully with the ethdev API
expectations.

After closing a device, all the flow rules stored in application
layer will no longer be valid anymore. Application should synchronize
the database and do not try to destory any rule on this device.
And after a device restarting, all the needed flow rules should be
reinserted via the create routine in the rte_flow lib.

---
v2 Changes:
    Fix the compiling error with MLX5 Debug mode in the 4th commit
    of "net/mlx5: introduce handle structure for DV flows".
v3 Changes:
    Refactor the device flow related structures to support non-cached
    mode for both Verbs and DV flows.
v4 Changes:
    Fix the code style warning for stdbool type and a typo in the
    code line comments.
---

Bing Zhao (4):
  net/mlx5: change operations for non-cached flows
  net/mlx5: reorganize mlx5 flow structures
  net/mlx5: separate the flow handle resource
  net/mlx5: check device stat before creating flow

 drivers/net/mlx5/mlx5.c            |  18 ++-
 drivers/net/mlx5/mlx5.h            |   9 +-
 drivers/net/mlx5/mlx5_flow.c       | 197 +++++++++++++++++------
 drivers/net/mlx5/mlx5_flow.h       | 179 ++++++++++++++-------
 drivers/net/mlx5/mlx5_flow_dv.c    | 311 ++++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 156 +++++++++++--------
 drivers/net/mlx5/mlx5_trigger.c    |  26 ++--
 7 files changed, 573 insertions(+), 323 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
@ 2020-03-24 15:33     ` Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
                       ` (4 subsequent siblings)
  5 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:33 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

When stopping a mlx5 device, all the flows inserted will be flushed
since they are with non-cached mode. And no more action will be done
for these flows in the device closing stage.
If the device restarts after stopped, no flow with non-cached mode
will be re-inserted.
The flush operation through rte interface will remain the same, and
all the flows will be flushed actively.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         | 11 +++++++++-
 drivers/net/mlx5/mlx5.h         |  5 ++++-
 drivers/net/mlx5/mlx5_flow.c    | 48 ++++++++++++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_trigger.c | 25 +++++++++++++--------
 4 files changed, 75 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 94aaa60..0613f70 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1234,8 +1234,17 @@ struct mlx5_flow_id_pool *
 	/* In case mlx5_dev_stop() has not been called. */
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_dev_interrupt_handler_devx_uninstall(dev);
+	/*
+	 * If default mreg copy action is removed at the stop stage,
+	 * the search will return none and nothing will be done anymore.
+	 */
+	mlx5_flow_stop_default(dev);
 	mlx5_traffic_disable(dev);
-	mlx5_flow_flush(dev, NULL);
+	/*
+	 * If all the flows are already flushed in the device stop stage,
+	 * then this will return directly without any action.
+	 */
+	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_flow_meter_flush(dev, NULL);
 	/* Prevent crashes when queues are still in use. */
 	dev->rx_pkt_burst = removed_rx_burst;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d7c519b..98e5fa5 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -712,7 +712,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
 				  struct rte_flow_error *error);
 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
-void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+			  bool active);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
@@ -725,6 +726,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 			 void *arg);
 int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_start_default(struct rte_eth_dev *dev);
+void mlx5_flow_stop_default(struct rte_eth_dev *dev);
 int mlx5_flow_verify(struct rte_eth_dev *dev);
 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2ef6558..aad9689 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8,6 +8,7 @@
 #include <stdalign.h>
 #include <stdint.h>
 #include <string.h>
+#include <stdbool.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -4449,15 +4450,25 @@ struct rte_flow *
  *   Pointer to Ethernet device.
  * @param list
  *   Pointer to a TAILQ flow list.
+ * @param active
+ *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+		     bool active)
 {
+	uint32_t num_flushed = 0;
+
 	while (!TAILQ_EMPTY(list)) {
 		struct rte_flow *flow;
 
 		flow = TAILQ_FIRST(list);
 		flow_list_destroy(dev, list, flow);
+		num_flushed++;
+	}
+	if (active) {
+		DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
+			dev->data->port_id, num_flushed);
 	}
 }
 
@@ -4523,6 +4534,37 @@ struct rte_flow *
 }
 
 /**
+ * Stop all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param list
+ *   Pointer to a TAILQ flow list.
+ */
+void
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
+{
+	flow_mreg_del_default_copy_action(dev);
+}
+
+/**
+ * Start all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start_default(struct rte_eth_dev *dev)
+{
+	struct rte_flow_error error;
+
+	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+	return flow_mreg_add_default_copy_action(dev, &error);
+}
+
+/**
  * Verify the flow list is empty
  *
  * @param dev
@@ -4737,7 +4779,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->flows, false);
 	return 0;
 }
 
@@ -5179,7 +5221,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev, &priv->flows, false);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 571b7a0..0801cb6 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -269,7 +269,6 @@
 int
 mlx5_dev_start(struct rte_eth_dev *dev)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	int ret;
 	int fine_inline;
 
@@ -318,14 +317,19 @@
 	mlx5_stats_init(dev);
 	ret = mlx5_traffic_enable(dev);
 	if (ret) {
-		DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+		DRV_LOG(ERR, "port %u failed to set defaults flows",
 			dev->data->port_id);
 		goto error;
 	}
-	ret = mlx5_flow_start(dev, &priv->flows);
+	/*
+	 * In non-cached mode, it only needs to start the default mreg copy
+	 * action and no flow created by application exists anymore.
+	 * But it is worth wrapping the interface for further usage.
+	 */
+	ret = mlx5_flow_start_default(dev);
 	if (ret) {
-		DRV_LOG(DEBUG, "port %u failed to set flows",
-			dev->data->port_id);
+		DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
+			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
 	rte_wmb();
@@ -339,7 +343,7 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	/* Rollback. */
 	dev->data->dev_started = 0;
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop_default(dev);
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
@@ -369,8 +373,11 @@
 	mlx5_mp_req_stop_rxtx(dev);
 	usleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop(dev, &priv->flows);
+	mlx5_flow_stop_default(dev);
+	/* Control flows for default traffic can be removed firstly. */
 	mlx5_traffic_disable(dev);
+	/* All RX queue flags will be cleared in the flush interface. */
+	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_rx_intr_vec_disable(dev);
 	mlx5_dev_interrupt_handler_uninstall(dev);
 	mlx5_txq_stop(dev);
@@ -529,7 +536,7 @@
 	return 0;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -546,7 +553,7 @@
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
 }
 
 /**
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
@ 2020-03-24 15:33     ` Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource Bing Zhao
                       ` (3 subsequent siblings)
  5 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:33 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

Common structures used for mlx5 flow creating and destroying are
reorganized in order to separating the parts only for destroying
from all the items.
The "mlx5_flow" will contain the common items of DV and Verbs flow,
specific items for DV / Verbs only. These items will only be used
when creating a flow.
At the end of "mlx5_flow", a nested structure "mlx5_flow_handle"
located. It contains all the items used both for creating and
destroying a flow. Also, it consists of common items, and DV / Verbs
specific items.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       |  43 ++++----
 drivers/net/mlx5/mlx5_flow.h       | 108 ++++++++++----------
 drivers/net/mlx5/mlx5_flow_dv.c    | 197 +++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  89 ++++++++---------
 4 files changed, 223 insertions(+), 214 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index aad9689..b2de4e6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -720,9 +720,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +751,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -793,9 +793,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +820,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -2312,8 +2312,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	struct mlx5_flow *dev_flow;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->qrss_id)
-			flow_qrss_free_id(dev, dev_flow->qrss_id);
+		if (dev_flow->handle.qrss_id)
+			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
 }
 
 static int
@@ -2696,18 +2696,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	uint64_t layers = 0;
 
-	/* If no decap actions, use the layers directly. */
-	if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->layers;
+	/*
+	 * Layers bits could be localization, but usually the compiler will
+	 * help to do the optimization work for source code.
+	 * If no decap actions, use the layers directly.
+	 */
+	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle.layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3453,7 +3457,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->layers = prefix_layers;
+		dev_flow->handle.layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3968,8 +3972,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->qrss_id = qrss_id;
-			qrss_id = 0;
+			dev_flow->handle.qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -3984,6 +3987,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					      external, error);
 		if (ret < 0)
 			goto exit;
+		/* qrss ID should be freed if failed. */
+		qrss_id = 0;
 		MLX5_ASSERT(dev_flow);
 	}
 
@@ -4080,7 +4085,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->mtr_flow_id = mtr_tag_id;
+		dev_flow->handle.mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 13c8589..f3aea53 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -464,25 +464,28 @@ struct mlx5_flow_tbl_data_entry {
 	/**< jump resource, at most one for each table created. */
 };
 
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * In rdma-core file providers/mlx5/verbs.c
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+/* Verbs specification header. */
+struct ibv_spec_header {
+	enum ibv_flow_spec_type type;
+	uint16_t size;
+};
+
+struct mlx5_flow_rss {
+	uint32_t level;
+	uint32_t queue_num; /**< Number of entries in @p queue. */
+	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+};
 
-/* DV flows structure. */
-struct mlx5_flow_dv {
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+/** Device flow handle structure for DV mode only. */
+struct mlx5_flow_handle_dv {
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	struct mlx5_flow_dv_match_params value;
-	/**< Holds the value that the packet is compared to. */
 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
 	/**< Pointer to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
 	/**< Pointer to modify header resource in cache. */
-	struct ibv_flow *flow; /**< Installed flow. */
 	struct mlx5_flow_dv_jump_tbl_resource *jump;
 	/**< Pointer to the jump action resource. */
 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
@@ -493,65 +496,64 @@ struct mlx5_flow_dv {
 	/**< Pointer to push VLAN action resource in cache. */
 	struct mlx5_flow_dv_tag_resource *tag_resource;
 	/**< pointer to the tag action. */
+};
+
+/** Device flow handle structure: used both for creating & destroying. */
+struct mlx5_flow_handle {
+	uint64_t layers;
+	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t act_flags;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+	void *ib_flow; /**< Verbs flow pointer. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
+	union {
+		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
-	/**< Action list. */
+	struct mlx5_flow_handle_dv dvh;
 #endif
-	int actions_n; /**< number of actions. */
 };
 
-/* Verbs specification header. */
-struct ibv_spec_header {
-	enum ibv_flow_spec_type type;
-	uint16_t size;
-};
+/*
+ * Max number of actions per DV flow.
+ * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
+ * in rdma-core file providers/mlx5/verbs.c.
+ */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
-	LIST_ENTRY(mlx5_flow_verbs) next;
-	unsigned int size; /**< Size of the attribute. */
-	struct {
-		struct ibv_flow_attr *attr;
-		/**< Pointer to the Specification buffer. */
-		uint8_t *specs; /**< Pointer to the specifications. */
-	};
-	struct ibv_flow *flow; /**< Verbs flow pointer. */
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
-	struct mlx5_vf_vlan vf_vlan;
-	/**< Structure for VF VLAN workaround. */
+/** Device flow structure only for DV flow creation. */
+struct mlx5_flow_resource_dv {
+	uint32_t group; /**< The group index. */
+	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+	int actions_n; /**< number of actions. */
+	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
 };
 
-struct mlx5_flow_rss {
-	uint32_t level;
-	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
-	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
-	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+/** Device flow structure only for Verbs flow creation. */
+struct mlx5_flow_resource_verbs {
+	unsigned int size; /**< Size of the attribute. */
+	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
+	uint8_t *specs; /**< Pointer to the specifications. */
 };
 
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next;
+	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
-	uint64_t layers;
-	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
-	uint64_t actions;
-	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
-	uint32_t group; /**< The group index. */
-	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_dv dv;
+		struct mlx5_flow_resource_dv dv;
 #endif
-		struct mlx5_flow_verbs verbs;
-	};
-	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
-		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+		struct mlx5_flow_resource_verbs verbs;
 	};
-	bool external; /**< true if the flow is created external to PMD. */
+	struct mlx5_flow_handle handle;
 };
 
 /* Flow meter state. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2090631..d1eec96 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,20 +92,22 @@
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
+	uint64_t layers = dev_flow->handle.layers;
+
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
 	 * suffix flow, the layers flags is set by the prefix flow. Need to
 	 * use the layer flags from prefix flow as the suffix flow may not
 	 * have the user defined items as the flow is split.
 	 */
-	if (dev_flow->layers) {
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+	if (layers) {
+		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
 			attr->ipv4 = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
 			attr->ipv6 = 1;
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+		if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
 			attr->tcp = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
 			attr->udp = 1;
 		attr->valid = 1;
 		return;
@@ -2377,7 +2379,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 	struct mlx5dv_dr_domain *domain;
 
-	resource->flags = dev_flow->group ? 0 : 1;
+	resource->flags = dev_flow->dv.group ? 0 : 1;
 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
@@ -2397,7 +2399,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.encap_decap = cache_resource;
+			dev_flow->handle.dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2423,7 +2425,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->dv.encap_decap = cache_resource;
+	dev_flow->handle.dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2474,7 +2476,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->dv.jump = &tbl_data->jump;
+	dev_flow->handle.dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2512,7 +2514,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.port_id_action = cache_resource;
+			dev_flow->handle.dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2540,7 +2542,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->dv.port_id_action = cache_resource;
+	dev_flow->handle.dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2583,7 +2585,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.push_vlan_res = cache_resource;
+			dev_flow->handle.dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2612,7 +2614,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->dv.push_vlan_res = cache_resource;
+	dev_flow->handle.dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3699,8 +3701,8 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5dv_dr_domain *ns;
 	uint32_t actions_len;
 
-	resource->flags =
-		dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+	resource->flags = dev_flow->dv.group ? 0 :
+			  MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
 				    resource->flags))
 		return rte_flow_error_set(error, EOVERFLOW,
@@ -3725,7 +3727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.modify_hdr = cache_resource;
+			dev_flow->handle.dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3752,7 +3754,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->dv.modify_hdr = cache_resource;
+	dev_flow->handle.dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5236,7 +5238,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
 }
 
@@ -5392,7 +5394,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->dv.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6893,7 +6895,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->dv.matcher = cache_matcher;
+			dev_flow->handle.dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6930,7 +6932,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->dv.matcher = cache_matcher;
+	dev_flow->handle.dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6972,7 +6974,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->dv.tag_resource = cache_resource;
+		dev_flow->handle.dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7001,7 +7003,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->dv.tag_resource = cache_resource;
+	dev_flow->handle.dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7146,7 +7148,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->layers;
+	uint64_t items = dev_flow->handle.layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7271,7 +7273,7 @@ struct field_modify_info modify_tcp[] = {
 				       !!priv->fdb_def_rule, &table, error);
 	if (ret)
 		return ret;
-	dev_flow->group = table;
+	dev_flow->dv.group = table;
 	if (attr->transfer)
 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
 	if (priority == MLX5_FLOW_PRIO_RSVD)
@@ -7304,7 +7306,7 @@ struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.port_id_action->action;
+				dev_flow->handle.dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7322,12 +7324,12 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7349,12 +7351,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7404,9 +7406,9 @@ struct field_modify_info modify_tcp[] = {
 				goto cnt_err;
 			}
 			flow->counter = flow_dv_counter_alloc(dev,
-							      count->shared,
-							      count->id,
-							      dev_flow->group);
+							count->shared,
+							count->id,
+							dev_flow->dv.group);
 			if (flow->counter == NULL)
 				goto cnt_err;
 			dev_flow->dv.actions[actions_n++] =
@@ -7452,7 +7454,7 @@ struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-					   dev_flow->dv.push_vlan_res->action;
+				dev_flow->handle.dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7479,7 +7481,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7489,7 +7491,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7499,7 +7501,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7507,7 +7509,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7519,7 +7521,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7551,7 +7553,7 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.jump->action;
+				dev_flow->handle.dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7684,7 +7686,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-					dev_flow->dv.modify_hdr->verbs_action;
+				dev_flow->handle.dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7695,7 +7697,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7728,7 +7730,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv4(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
@@ -7751,7 +7753,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv6(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
@@ -7900,7 +7902,7 @@ struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->layers |= item_flags;
+	dev_flow->handle.layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7911,7 +7913,7 @@ struct field_modify_info modify_tcp[] = {
 	/* reserved field no needs to be set to 0 here. */
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
-	tbl_key.table_id = dev_flow->group;
+	tbl_key.table_id = dev_flow->dv.group;
 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
 		return -rte_errno;
 	return 0;
@@ -7935,21 +7937,25 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+		dh = &dev_flow->handle;
 		dv = &dev_flow->dv;
 		n = dv->actions_n;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			if (dev_flow->transfer) {
+		dv_h = &dh->dvh;
+		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
 			} else {
-				dv->hrxq = mlx5_hrxq_drop_new(dev);
-				if (!dv->hrxq) {
+				dh->hrxq = mlx5_hrxq_drop_new(dev);
+				if (!dh->hrxq) {
 					rte_flow_error_set
 						(error, errno,
 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7957,9 +7963,9 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot get drop hash queue");
 					goto error;
 				}
-				dv->actions[n++] = dv->hrxq->action;
+				dv->actions[n++] = dh->hrxq->action;
 			}
-		} else if (dev_flow->actions &
+		} else if (dh->act_flags &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
@@ -7976,7 +7982,7 @@ struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->layers &
+					 !!(dev_flow->handle.layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -7986,14 +7992,14 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dv->hrxq = hrxq;
-			dv->actions[n++] = dv->hrxq->action;
+			dh->hrxq = hrxq;
+			dv->actions[n++] = dh->hrxq->action;
 		}
-		dv->flow =
-			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+		dh->ib_flow =
+			mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
 						  (void *)&dv->value, n,
 						  dv->actions);
-		if (!dv->flow) {
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -8001,32 +8007,30 @@ struct field_modify_info modify_tcp[] = {
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->dv.vf_vlan.tag &&
-		    !dev_flow->dv.vf_vlan.created) {
+		    dh->vf_vlan.tag && !dh->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_dv *dv = &dev_flow->dv;
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
+		if (dh_tmp->hrxq) {
+			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh_tmp->hrxq);
+			dh_tmp->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8047,7 +8051,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_matcher_release(struct rte_eth_dev *dev,
 			struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8080,7 +8084,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->dv.encap_decap;
+						flow->handle.dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8113,7 +8117,8 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
 				  struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+						flow->handle.dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8147,7 +8152,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->dv.modify_hdr;
+						flow->handle.dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8178,7 +8183,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-		flow->dv.port_id_action;
+						flow->handle.dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8209,7 +8214,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-		flow->dv.push_vlan_res;
+						flow->handle.dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8239,27 +8244,26 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		if (dv->flow) {
-			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
-			dv->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -8291,20 +8295,21 @@ struct field_modify_info modify_tcp[] = {
 	while (!LIST_EMPTY(&flow->dev_flows)) {
 		dev_flow = LIST_FIRST(&flow->dev_flows);
 		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->dv.matcher)
+		if (dev_flow->handle.dvh.matcher)
 			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->dv.encap_decap)
+		if (dev_flow->handle.dvh.encap_decap)
 			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->dv.modify_hdr)
+		if (dev_flow->handle.dvh.modify_hdr)
 			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->dv.jump)
+		if (dev_flow->handle.dvh.jump)
 			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->dv.port_id_action)
+		if (dev_flow->handle.dvh.port_id_action)
 			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->dv.push_vlan_res)
+		if (dev_flow->handle.dvh.push_vlan_res)
 			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->dv.tag_resource)
-			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
+		if (dev_flow->handle.dvh.tag_resource)
+			flow_dv_tag_release(dev,
+					dev_flow->handle.dvh.tag_resource);
 		rte_free(dev_flow);
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 459e7b6..08185ec 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,8 @@
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
+flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+		    void *src, unsigned int size)
 {
 	void *dst;
 
@@ -393,7 +394,7 @@
 	else
 		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->verbs.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -743,7 +744,7 @@
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -1418,7 +1419,7 @@
 	dev_flow->verbs.attr = (void *)(dev_flow + 1);
 	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
 }
 
@@ -1498,7 +1499,7 @@
 						  "action not supported");
 		}
 	}
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1600,7 +1601,7 @@
 						  "item not supported");
 		}
 	}
-	dev_flow->layers = item_flags;
+	dev_flow->handle.layers = item_flags;
 	dev_flow->verbs.attr->priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
 	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
@@ -1618,28 +1619,26 @@
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->flow) {
-			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
-			verbs->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -1688,15 +1687,15 @@
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			verbs->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!verbs->hrxq) {
+		dh = &dev_flow->handle;
+		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
+			dh->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!dh->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1714,12 +1713,12 @@
 					     flow->rss.queue_num);
 			if (!hrxq)
 				hrxq = mlx5_hrxq_new(dev, flow->rss.key,
-						     MLX5_RSS_HASH_KEY_LEN,
-						     dev_flow->hash_fields,
-						     (*flow->rss.queue),
-						     flow->rss.queue_num,
-						     !!(dev_flow->layers &
-						       MLX5_FLOW_LAYER_TUNNEL));
+						MLX5_RSS_HASH_KEY_LEN,
+						dev_flow->hash_fields,
+						(*flow->rss.queue),
+						flow->rss.queue_num,
+						!!(dev_flow->handle.layers &
+						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -1727,11 +1726,11 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			verbs->hrxq = hrxq;
+			dh->hrxq = hrxq;
 		}
-		verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
-						     verbs->attr);
-		if (!verbs->flow) {
+		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
+						     dev_flow->verbs.attr);
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1739,33 +1738,31 @@
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->verbs.vf_vlan.tag &&
-		    !dev_flow->verbs.vf_vlan.created) {
+		    dev_flow->handle.vf_vlan.tag &&
+		    !dev_flow->handle.vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		dh = &dev_flow->handle;
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
@ 2020-03-24 15:33     ` Bing Zhao
  2020-03-24 15:34     ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow Bing Zhao
                       ` (2 subsequent siblings)
  5 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:33 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

Only the members of flow handle structure will be used when trying
to destroy a flow. Other members of mlx5 device flow resource will
only be used for flow creating, and they could be reused for different
flows.
So only the device flow handle structure needs to be saved for further
usage. This could be separated from the whole mlx5 device flow and
stored with a list for each rte flow.
Other members will be pre-allocated with an array, and an index will
be used to help to apply each device flow to the hardware.
The flow handle sizes of Verbs and DV mode will be different, and
some calculation could be done before allocating a verbs handle.
Then the total memory consumption will less for Verbs when there is
no inbox driver being used.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c            |   7 ++
 drivers/net/mlx5/mlx5.h            |   4 +
 drivers/net/mlx5/mlx5_flow.c       | 127 +++++++++++++-------
 drivers/net/mlx5/mlx5_flow.h       |  89 ++++++++++++--
 drivers/net/mlx5/mlx5_flow_dv.c    | 230 +++++++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 139 ++++++++++++----------
 drivers/net/mlx5/mlx5_trigger.c    |   1 +
 7 files changed, 381 insertions(+), 216 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0613f70..8dda0c3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1246,6 +1246,8 @@ struct mlx5_flow_id_pool *
 	 */
 	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_flow_meter_flush(dev, NULL);
+	/* Free the intermediate buffers for flow creation. */
+	mlx5_flow_free_intermediate(dev);
 	/* Prevent crashes when queues are still in use. */
 	dev->rx_pkt_burst = removed_rx_burst;
 	dev->tx_pkt_burst = removed_tx_burst;
@@ -2768,6 +2770,11 @@ struct mlx5_flow_id_pool *
 			err = ENOTSUP;
 			goto error;
 	}
+	/*
+	 * Allocate the buffer for flow creating, just once.
+	 * The allocation must be done before any flow creating.
+	 */
+	mlx5_flow_alloc_intermediate(eth_dev);
 	/* Query availibility of metadata reg_c's. */
 	err = mlx5_flow_discover_mreg_c(eth_dev);
 	if (err < 0) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 98e5fa5..2cc4c76 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -517,6 +517,8 @@ struct mlx5_priv {
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
+	void *inter_flows; /* Intermediate resources for flow creation. */
+	int flow_idx; /* Intermediate device flow index. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
 	LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -728,6 +730,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
 int mlx5_flow_start_default(struct rte_eth_dev *dev);
 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
+void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
+void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
 int mlx5_flow_verify(struct rte_eth_dev *dev);
 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index b2de4e6..f2d3730 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -712,17 +712,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+		       struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +753,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -773,10 +775,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_set(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_set(dev, flow, dev_handle);
 }
 
 /**
@@ -785,17 +787,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to the device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+			struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +824,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -843,10 +847,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_trim(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_trim(dev, flow, dev_handle);
 }
 
 /**
@@ -2309,11 +2313,11 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
 			     struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->handle.qrss_id)
-			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		if (dev_handle->qrss_id)
+			flow_qrss_free_id(dev, dev_handle->qrss_id);
 }
 
 static int
@@ -2329,7 +2333,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 }
 
 static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+		  const struct rte_flow_attr *attr __rte_unused,
 		  const struct rte_flow_item items[] __rte_unused,
 		  const struct rte_flow_action actions[] __rte_unused,
 		  struct rte_flow_error *error)
@@ -2469,6 +2474,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   setting backward reference to the flow should be done out of this function.
  *   layers field is not filled either.
  *
+ * @param[in] dev
+ *   Pointer to the dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -2482,7 +2489,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
  */
 static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+		 const struct rte_flow *flow,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
@@ -2493,7 +2501,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
-	return fops->prepare(attr, items, actions, error);
+	return fops->prepare(dev, attr, items, actions, error);
 }
 
 /**
@@ -2701,17 +2709,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * help to do the optimization work for source code.
 	 * If no decap actions, use the layers directly.
 	 */
-	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->handle.layers;
+	if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle->layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3412,7 +3420,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
@@ -3445,19 +3453,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_flow *dev_flow;
 
-	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
 	if (!dev_flow)
 		return -rte_errno;
 	dev_flow->flow = flow;
 	dev_flow->external = external;
 	/* Subflow object was created, we must include one in the list. */
-	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+	LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 	/*
 	 * If dev_flow is as one of the suffix flow, some actions in suffix
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->handle.layers = prefix_layers;
+		dev_flow->handle->layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3972,7 +3980,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->handle.qrss_id = qrss_id;
+			dev_flow->handle->qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -4085,7 +4093,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->handle.mtr_flow_id = mtr_tag_id;
+		dev_flow->handle->mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
@@ -4256,7 +4264,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
-	LIST_INIT(&flow->dev_flows);
+	LIST_INIT(&flow->dev_handles);
 	if (rss && rss->types) {
 		unsigned int graph_root;
 
@@ -4271,6 +4279,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		buf->entries = 1;
 		buf->entry[0].pattern = (void *)(uintptr_t)items;
 	}
+	/* Reset device flow index to 0. */
+	priv->flow_idx = 0;
 	for (i = 0; i < buf->entries; ++i) {
 		/*
 		 * The splitter may create multiple dev_flows,
@@ -4289,13 +4299,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
 		attr_tx.ingress = 0;
 		attr_tx.egress = 1;
-		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
 		dev_flow->flow = flow;
 		dev_flow->external = 0;
-		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+		LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
 					 actions_hairpin_tx.actions, error);
@@ -4543,8 +4553,6 @@ struct rte_flow *
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to a TAILQ flow list.
  */
 void
 mlx5_flow_stop_default(struct rte_eth_dev *dev)
@@ -4570,6 +4578,37 @@ struct rte_flow *
 }
 
 /**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!priv->inter_flows)
+		priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
+					       sizeof(struct mlx5_flow), 0);
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_free(priv->inter_flows);
+	priv->inter_flows = NULL;
+}
+
+/**
  * Verify the flow list is empty
  *
  * @param dev
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f3aea53..0f0e59d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -500,6 +500,8 @@ struct mlx5_flow_handle_dv {
 
 /** Device flow handle structure: used both for creating & destroying. */
 struct mlx5_flow_handle {
+	LIST_ENTRY(mlx5_flow_handle) next;
+	/**< Pointer to next device flow handle. */
 	uint64_t layers;
 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
 	uint64_t act_flags;
@@ -517,6 +519,18 @@ struct mlx5_flow_handle {
 };
 
 /*
+ * Size for Verbs device flow handle structure only. Do not use the DV only
+ * structure in Verbs. No DV flows attributes will be accessed.
+ * Macro offsetof() could also be used here.
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#define MLX5_FLOW_HANDLE_VERBS_SIZE \
+	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
+#else
+#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
+#endif
+
+/*
  * Max number of actions per DV flow.
  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
  * in rdma-core file providers/mlx5/verbs.c.
@@ -524,7 +538,7 @@ struct mlx5_flow_handle {
 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
 /** Device flow structure only for DV flow creation. */
-struct mlx5_flow_resource_dv {
+struct mlx5_flow_dv_workspace {
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	int actions_n; /**< number of actions. */
@@ -533,27 +547,79 @@ struct mlx5_flow_resource_dv {
 	/**< Holds the value that the packet is compared to. */
 };
 
+/*
+ * Maximal Verbs flow specifications & actions size.
+ * Some elements are mutually exclusive, but enough space should be allocated.
+ * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
+ *               2. One tunnel header (exception: GRE + MPLS),
+ *                  SPEC length: GRE == tunnel.
+ * Actions: 1. 1 Mark OR Flag.
+ *          2. 1 Drop (if any).
+ *          3. No limitation for counters, but it makes no sense to support too
+ *             many counters in a single device flow.
+ */
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_gre) + \
+			sizeof(struct ibv_flow_spec_mpls)) \
+		)
+#else
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_tunnel)) \
+		)
+#endif
+
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) + \
+			sizeof(struct ibv_flow_spec_counter_action) * 4 \
+		)
+#else
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) \
+		)
+#endif
+
+#define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
+		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
+
 /** Device flow structure only for Verbs flow creation. */
-struct mlx5_flow_resource_verbs {
+struct mlx5_flow_verbs_workspace {
 	unsigned int size; /**< Size of the attribute. */
-	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
-	uint8_t *specs; /**< Pointer to the specifications. */
+	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
+	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
+	/**< Specifications & actions buffer of verbs flow. */
 };
 
+/** Maximal number of device sub-flows supported. */
+#define MLX5_NUM_MAX_DEV_FLOWS 32
+
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
 	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_resource_dv dv;
+		struct mlx5_flow_dv_workspace dv;
 #endif
-		struct mlx5_flow_resource_verbs verbs;
+		struct mlx5_flow_verbs_workspace verbs;
 	};
-	struct mlx5_flow_handle handle;
+	struct mlx5_flow_handle *handle;
 };
 
 /* Flow meter state. */
@@ -667,8 +733,8 @@ struct rte_flow {
 	struct mlx5_flow_mreg_copy_resource *mreg_copy;
 	/**< pointer to metadata register copy table resource. */
 	struct mlx5_flow_meter *meter; /**< Holds flow meter. */
-	LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
-	/**< Device flows that are part of the flow. */
+	LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+	/**< Device flow handles that are part of the flow. */
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
@@ -681,7 +747,8 @@ typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
 				    bool external,
 				    struct rte_flow_error *error);
 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
-	(const struct rte_flow_attr *attr, const struct rte_flow_item items[],
+	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+	 const struct rte_flow_item items[],
 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
 				     struct mlx5_flow *dev_flow,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d1eec96..d532ce0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,7 +92,7 @@
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
-	uint64_t layers = dev_flow->handle.layers;
+	uint64_t layers = dev_flow->handle->layers;
 
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
@@ -2399,7 +2399,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.encap_decap = cache_resource;
+			dev_flow->handle->dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2425,7 +2425,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->handle.dvh.encap_decap = cache_resource;
+	dev_flow->handle->dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2476,7 +2476,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->handle.dvh.jump = &tbl_data->jump;
+	dev_flow->handle->dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2514,7 +2514,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.port_id_action = cache_resource;
+			dev_flow->handle->dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2542,7 +2542,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->handle.dvh.port_id_action = cache_resource;
+	dev_flow->handle->dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2585,7 +2585,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.push_vlan_res = cache_resource;
+			dev_flow->handle->dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2614,7 +2614,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->handle.dvh.push_vlan_res = cache_resource;
+	dev_flow->handle->dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3727,7 +3727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.modify_hdr = cache_resource;
+			dev_flow->handle->dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3754,7 +3754,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->handle.dvh.modify_hdr = cache_resource;
+	dev_flow->handle->dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5207,6 +5207,8 @@ struct field_modify_info modify_tcp[] = {
  * Internal preparation function. Allocates the DV flow size,
  * this size is constant.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -5221,22 +5223,41 @@ struct field_modify_info modify_tcp[] = {
  *   otherwise NULL and rte_errno is set.
  */
 static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr __rte_unused,
 		const struct rte_flow_item items[] __rte_unused,
 		const struct rte_flow_action actions[] __rte_unused,
 		struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow);
+	size_t size = sizeof(struct mlx5_flow_handle);
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, size, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/*
+	 * The matching value needs to be cleared to 0 before using. In the
+	 * past, it will be automaticlly cleared when using rte_*alloc
+	 * API. The time consumption will be almost the same as before.
+	 */
+	memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
 	dev_flow->ingress = attr->ingress;
 	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
@@ -5394,7 +5415,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6895,7 +6916,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->handle.dvh.matcher = cache_matcher;
+			dev_flow->handle->dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6932,7 +6953,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->handle.dvh.matcher = cache_matcher;
+	dev_flow->handle->dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6974,7 +6995,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->handle.dvh.tag_resource = cache_resource;
+		dev_flow->handle->dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7003,7 +7024,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->handle.dvh.tag_resource = cache_resource;
+	dev_flow->handle->dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7148,7 +7169,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->handle.layers;
+	uint64_t items = dev_flow->handle->layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7238,6 +7259,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *dev_conf = &priv->config;
 	struct rte_flow *flow = dev_flow->flow;
+	struct mlx5_flow_handle *handle = dev_flow->handle;
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint64_t action_flags = 0;
@@ -7306,7 +7328,7 @@ struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.port_id_action->action;
+					handle->dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7324,12 +7346,17 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			/*
+			 * Only one FLAG or MARK is supported per device flow
+			 * right now. So the pointer to the tag resource must be
+			 * zero before the register process.
+			 */
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7351,12 +7378,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7454,7 +7481,7 @@ struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.push_vlan_res->action;
+					handle->dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7481,7 +7508,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7491,7 +7518,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7501,7 +7528,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7509,7 +7536,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7521,7 +7548,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7553,7 +7580,7 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.jump->action;
+					handle->dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7686,7 +7713,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-				dev_flow->handle.dvh.modify_hdr->verbs_action;
+					handle->dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7697,7 +7724,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->handle.act_flags = action_flags;
+	handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7902,7 +7929,7 @@ struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->handle.layers |= item_flags;
+	handle->layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7937,19 +7964,21 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_dv_workspace *dv;
 	struct mlx5_flow_handle *dh;
 	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
+	int idx;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
 		dv = &dev_flow->dv;
-		n = dv->actions_n;
+		dh = dev_flow->handle;
 		dv_h = &dh->dvh;
+		n = dv->actions_n;
 		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
 			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
@@ -7982,7 +8011,7 @@ struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->handle.layers &
+					 !!(dh->layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -8020,17 +8049,16 @@ struct field_modify_info modify_tcp[] = {
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
-		if (dh_tmp->hrxq) {
-			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
+		if (dh->hrxq) {
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh_tmp->hrxq);
-			dh_tmp->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8041,17 +8069,17 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_matcher_release(struct rte_eth_dev *dev,
-			struct mlx5_flow *flow)
+			struct mlx5_flow_handle *handle)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
+	struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8074,17 +8102,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release an encap/decap resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->handle.dvh.encap_decap;
+						handle->dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8107,18 +8135,18 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
-				  struct mlx5_flow *flow)
+				  struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
-						flow->handle.dvh.jump;
+							handle->dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8142,17 +8170,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release a modify-header resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->handle.dvh.modify_hdr;
+							handle->dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8173,17 +8201,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release port ID action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-						flow->handle.dvh.port_id_action;
+						handle->dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8204,17 +8232,17 @@ struct field_modify_info modify_tcp[] = {
 /**
  * Release push vlan action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-						flow->handle.dvh.push_vlan_res;
+						handle->dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8245,18 +8273,16 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
 		if (dh->ib_flow) {
 			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
 			dh->ib_flow = NULL;
 		}
 		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, dh->hrxq);
@@ -8279,7 +8305,7 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
 	if (!flow)
 		return;
@@ -8292,25 +8318,25 @@ struct field_modify_info modify_tcp[] = {
 		mlx5_flow_meter_detach(flow->meter);
 		flow->meter = NULL;
 	}
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->handle.dvh.matcher)
-			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.encap_decap)
-			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.modify_hdr)
-			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.jump)
-			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.port_id_action)
-			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.push_vlan_res)
-			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.tag_resource)
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		dev_handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(dev_handle, next);
+		if (dev_handle->dvh.matcher)
+			flow_dv_matcher_release(dev, dev_handle);
+		if (dev_handle->dvh.encap_decap)
+			flow_dv_encap_decap_resource_release(dev_handle);
+		if (dev_handle->dvh.modify_hdr)
+			flow_dv_modify_hdr_resource_release(dev_handle);
+		if (dev_handle->dvh.jump)
+			flow_dv_jump_tbl_resource_release(dev, dev_handle);
+		if (dev_handle->dvh.port_id_action)
+			flow_dv_port_id_action_resource_release(dev_handle);
+		if (dev_handle->dvh.push_vlan_res)
+			flow_dv_push_vlan_action_resource_release(dev_handle);
+		if (dev_handle->dvh.tag_resource)
 			flow_dv_tag_release(dev,
-					dev_flow->handle.dvh.tag_resource);
-		rte_free(dev_flow);
+					    dev_handle->dvh.tag_resource);
+		rte_free(dev_handle);
 	}
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 08185ec..ccd3395 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,7 @@
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
 		    void *src, unsigned int size)
 {
 	void *dst;
@@ -263,7 +263,7 @@
 	MLX5_ASSERT(verbs->specs);
 	dst = (void *)(verbs->specs + verbs->size);
 	memcpy(dst, src, size);
-	++verbs->attr->num_of_specs;
+	++verbs->attr.num_of_specs;
 	verbs->size += size;
 }
 
@@ -392,9 +392,9 @@
 	if (!(item_flags & l2m))
 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
 	else
-		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
+		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -744,7 +744,7 @@
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -774,11 +774,11 @@
 	}
 #endif
 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV4_EXT,
 						       IPPROTO_GRE);
 	else
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV6,
 						       IPPROTO_GRE);
 	flow_verbs_spec_add(verbs, &tunnel, size);
@@ -1385,6 +1385,8 @@
  * The required size is calculate based on the actions and items. This function
  * also returns the detected actions and items for later use.
  *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -1399,25 +1401,45 @@
  *   is set.
  */
 static struct mlx5_flow *
-flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_verbs_prepare(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr __rte_unused,
 		   const struct rte_flow_item items[],
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+	size_t size = 0;
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
 	size += flow_verbs_get_actions_size(actions);
 	size += flow_verbs_get_items_size(items);
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
+		rte_flow_error_set(error, E2BIG,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Verbs spec/action size too large");
+		return NULL;
+	}
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
-	dev_flow->verbs.attr = (void *)(dev_flow + 1);
-	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
+	/* Memcpy is used, only size needs to be cleared to 0. */
+	dev_flow->verbs.size = 0;
+	dev_flow->verbs.attr.num_of_specs = 0;
 	dev_flow->ingress = attr->ingress;
 	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
@@ -1499,7 +1521,7 @@
 						  "action not supported");
 		}
 	}
-	dev_flow->handle.act_flags = action_flags;
+	dev_flow->handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1601,10 +1623,11 @@
 						  "item not supported");
 		}
 	}
-	dev_flow->handle.layers = item_flags;
-	dev_flow->verbs.attr->priority =
+	dev_flow->handle->layers = item_flags;
+	/* Other members of attr will be ignored. */
+	dev_flow->verbs.attr.priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
-	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
+	dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
 	return 0;
 }
 
@@ -1619,26 +1642,24 @@
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->ib_flow) {
-			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
-			dh->ib_flow = NULL;
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
+			handle->ib_flow = NULL;
 		}
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 }
 
@@ -1653,15 +1674,15 @@
 static void
 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
 	flow_verbs_remove(dev, flow);
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		rte_free(dev_flow);
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(handle, next);
+		rte_free(handle);
 	}
 	if (flow->counter) {
 		flow_verbs_counter_release(dev, flow->counter);
@@ -1687,15 +1708,17 @@
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle *handle;
 	struct mlx5_flow *dev_flow;
 	int err;
-
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
-			dh->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!dh->hrxq) {
+	int idx;
+
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
+		handle = dev_flow->handle;
+		if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+			handle->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!handle->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1717,7 +1740,7 @@
 						dev_flow->hash_fields,
 						(*flow->rss.queue),
 						flow->rss.queue_num,
-						!!(dev_flow->handle.layers &
+						!!(handle->layers &
 						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
@@ -1726,11 +1749,11 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->hrxq = hrxq;
+			handle->hrxq = hrxq;
 		}
-		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
-						     dev_flow->verbs.attr);
-		if (!dh->ib_flow) {
+		handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+						     &dev_flow->verbs.attr);
+		if (!handle->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1738,31 +1761,29 @@
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->handle.vf_vlan.tag &&
-		    !dev_flow->handle.vf_vlan.created) {
+		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 0801cb6..438b705 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -307,6 +307,7 @@
 		mlx5_txq_stop(dev);
 		return -rte_errno;
 	}
+	/* Set started flag here for the following steps like control flow. */
 	dev->data->dev_started = 1;
 	ret = mlx5_rx_intr_vec_enable(dev);
 	if (ret) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                       ` (2 preceding siblings ...)
  2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource Bing Zhao
@ 2020-03-24 15:34     ` Bing Zhao
  2020-03-25  9:13     ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Matan Azrad
  2020-03-29 15:50     ` Raslan Darawsheh
  5 siblings, 0 replies; 26+ messages in thread
From: Bing Zhao @ 2020-03-24 15:34 UTC (permalink / raw)
  To: orika, rasland, matan; +Cc: viacheslavo, dev

By default, flows are categorized into two types of a mlx5 device.
  1. The PMD driver will create some default flows to enable the
     traffic and give some default behaviors on the packets. And
     this is transparent to the upper layer application.
  2. Other flows will be created in the application based on its
     needs.
When in the old cached mode for application flows, it is allowed
to created the flow before the device is started. And when
starting the device, all the flows will be applied to the hardware
and take effect. The cached flows will be also applied in the same
time.
In non-cached mode, all the flows will never be cached when stopping
a device. So it makes no sense to insert any flow into the device
before it is started. Default flows owned by PMD driver are not
affected in this case.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f2d3730..6438a14 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4328,7 +4328,11 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		if (ret)
 			goto error;
 	}
-	if (dev->data->dev_started) {
+	/*
+	 * If the flow is external (from application) OR device is started, then
+	 * the flow will be applied immediately.
+	 */
+	if (external || dev->data->dev_started) {
 		ret = flow_drv_apply(dev, flow, error);
 		if (ret < 0)
 			goto error;
@@ -4420,6 +4424,17 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
+	/*
+	 * If the device is not started yet, it is not allowed to created a
+	 * flow from application. PMD default flows and traffic control flows
+	 * are not affected.
+	 */
+	if (unlikely(!dev->data->dev_started)) {
+		rte_errno = ENODEV;
+		DRV_LOG(DEBUG, "port %u is not started when "
+			"inserting a flow", dev->data->port_id);
+		return NULL;
+	}
 	return flow_list_create(dev, &priv->flows,
 				attr, items, actions, true, error);
 }
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                       ` (3 preceding siblings ...)
  2020-03-24 15:34     ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow Bing Zhao
@ 2020-03-25  9:13     ` Matan Azrad
  2020-03-29 15:50     ` Raslan Darawsheh
  5 siblings, 0 replies; 26+ messages in thread
From: Matan Azrad @ 2020-03-25  9:13 UTC (permalink / raw)
  To: Bing Zhao, Ori Kam, Raslan Darawsheh; +Cc: Slava Ovsiienko, dev



From: Bing Zhao
> This patch set will remove the flow rules cache and move to the non-cached
> mode for both DV and Verbs mode.
> 
> In the device closing stage, all the software resources for flows created will
> be freed and corresponding hardware resources will be released. Then the
> total cost of the memory will be reduced and the behavior of mlx5 PMD will
> comply fully with the ethdev API expectations.
> 
> After closing a device, all the flow rules stored in application layer will no
> longer be valid anymore. Application should synchronize the database and do
> not try to destory any rule on this device.
> And after a device restarting, all the needed flow rules should be reinserted
> via the create routine in the rte_flow lib.
> 
> ---
> v2 Changes:
>     Fix the compiling error with MLX5 Debug mode in the 4th commit
>     of "net/mlx5: introduce handle structure for DV flows".
> v3 Changes:
>     Refactor the device flow related structures to support non-cached
>     mode for both Verbs and DV flows.
> v4 Changes:
>     Fix the code style warning for stdbool type and a typo in the
>     code line comments.
> ---
> 
> Bing Zhao (4):
>   net/mlx5: change operations for non-cached flows
>   net/mlx5: reorganize mlx5 flow structures
>   net/mlx5: separate the flow handle resource
>   net/mlx5: check device stat before creating flow
> 
>  drivers/net/mlx5/mlx5.c            |  18 ++-
>  drivers/net/mlx5/mlx5.h            |   9 +-
>  drivers/net/mlx5/mlx5_flow.c       | 197 +++++++++++++++++------
>  drivers/net/mlx5/mlx5_flow.h       | 179 ++++++++++++++-------
>  drivers/net/mlx5/mlx5_flow_dv.c    | 311 ++++++++++++++++++++----------
> -------
>  drivers/net/mlx5/mlx5_flow_verbs.c | 156 +++++++++++--------
>  drivers/net/mlx5/mlx5_trigger.c    |  26 ++--
>  7 files changed, 573 insertions(+), 323 deletions(-)

Series-acked-by: Matan Azrad <matan@mellanox.com>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules
  2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
                       ` (4 preceding siblings ...)
  2020-03-25  9:13     ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Matan Azrad
@ 2020-03-29 15:50     ` Raslan Darawsheh
  5 siblings, 0 replies; 26+ messages in thread
From: Raslan Darawsheh @ 2020-03-29 15:50 UTC (permalink / raw)
  To: Bing Zhao, Ori Kam, Matan Azrad; +Cc: Slava Ovsiienko, dev

Hi,

> -----Original Message-----
> From: Bing Zhao <bingz@mellanox.com>
> Sent: Tuesday, March 24, 2020 5:34 PM
> To: Ori Kam <orika@mellanox.com>; Raslan Darawsheh
> <rasland@mellanox.com>; Matan Azrad <matan@mellanox.com>
> Cc: Slava Ovsiienko <viacheslavo@mellanox.com>; dev@dpdk.org
> Subject: [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules
> 
> This patch set will remove the flow rules cache and move to the
> non-cached mode for both DV and Verbs mode.
> 
> In the device closing stage, all the software resources for flows
> created will be freed and corresponding hardware resources will be
> released. Then the total cost of the memory will be reduced and the
> behavior of mlx5 PMD will comply fully with the ethdev API
> expectations.
> 
> After closing a device, all the flow rules stored in application
> layer will no longer be valid anymore. Application should synchronize
> the database and do not try to destory any rule on this device.
> And after a device restarting, all the needed flow rules should be
> reinserted via the create routine in the rte_flow lib.
> 
> ---
> v2 Changes:
>     Fix the compiling error with MLX5 Debug mode in the 4th commit
>     of "net/mlx5: introduce handle structure for DV flows".
> v3 Changes:
>     Refactor the device flow related structures to support non-cached
>     mode for both Verbs and DV flows.
> v4 Changes:
>     Fix the code style warning for stdbool type and a typo in the
>     code line comments.
> ---
> 
> Bing Zhao (4):
>   net/mlx5: change operations for non-cached flows
>   net/mlx5: reorganize mlx5 flow structures
>   net/mlx5: separate the flow handle resource
>   net/mlx5: check device stat before creating flow
> 
>  drivers/net/mlx5/mlx5.c            |  18 ++-
>  drivers/net/mlx5/mlx5.h            |   9 +-
>  drivers/net/mlx5/mlx5_flow.c       | 197 +++++++++++++++++------
>  drivers/net/mlx5/mlx5_flow.h       | 179 ++++++++++++++-------
>  drivers/net/mlx5/mlx5_flow_dv.c    | 311 ++++++++++++++++++++----------
> -------
>  drivers/net/mlx5/mlx5_flow_verbs.c | 156 +++++++++++--------
>  drivers/net/mlx5/mlx5_trigger.c    |  26 ++--
>  7 files changed, 573 insertions(+), 323 deletions(-)
> 
> --
> 1.8.3.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2020-03-29 15:50 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:34     ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-25  9:13     ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Matan Azrad
2020-03-29 15:50     ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).