DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bing Zhao <bingz@mellanox.com>
To: orika@mellanox.com, rasland@mellanox.com, matan@mellanox.com
Cc: viacheslavo@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures
Date: Tue, 24 Mar 2020 15:16:18 +0000	[thread overview]
Message-ID: <1585062980-27196-3-git-send-email-bingz@mellanox.com> (raw)
In-Reply-To: <1585062980-27196-1-git-send-email-bingz@mellanox.com>

Common structures used for mlx5 flow creating and destroying are
reorganized in order to separating the parts only for destroying
from all the items.
The "mlx5_flow" will contain the common items of DV and Verbs flow,
specific items for DV / Verbs only. These items will only be used
when creating a flow.
At the end of "mlx5_flow", a nested structure "mlx5_flow_handle"
located. It contains all the items used both for creating and
destroying a flow. Also, it consists of common items, and DV / Verbs
specific items.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       |  43 ++++----
 drivers/net/mlx5/mlx5_flow.h       | 108 ++++++++++----------
 drivers/net/mlx5/mlx5_flow_dv.c    | 197 +++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  89 ++++++++---------
 4 files changed, 223 insertions(+), 214 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 81a85ec..230f071 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -720,9 +720,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +751,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -793,9 +793,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->actions &
+	const int mark = !!(dev_flow->handle.act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +820,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->layers) ==
+				     dev_flow->handle.layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -2312,8 +2312,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	struct mlx5_flow *dev_flow;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->qrss_id)
-			flow_qrss_free_id(dev, dev_flow->qrss_id);
+		if (dev_flow->handle.qrss_id)
+			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
 }
 
 static int
@@ -2696,18 +2696,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	uint64_t layers = 0;
 
-	/* If no decap actions, use the layers directly. */
-	if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->layers;
+	/*
+	 * Layers bits could be localization, but usually the compiler will
+	 * help to do the optimization work for source code.
+	 * If no decap actions, use the layers directly.
+	 */
+	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle.layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3453,7 +3457,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->layers = prefix_layers;
+		dev_flow->handle.layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3968,8 +3972,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->qrss_id = qrss_id;
-			qrss_id = 0;
+			dev_flow->handle.qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -3984,6 +3987,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					      external, error);
 		if (ret < 0)
 			goto exit;
+		/* qrss ID should be freed if failed. */
+		qrss_id = 0;
 		MLX5_ASSERT(dev_flow);
 	}
 
@@ -4080,7 +4085,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->mtr_flow_id = mtr_tag_id;
+		dev_flow->handle.mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 13c8589..f3aea53 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -464,25 +464,28 @@ struct mlx5_flow_tbl_data_entry {
 	/**< jump resource, at most one for each table created. */
 };
 
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * In rdma-core file providers/mlx5/verbs.c
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+/* Verbs specification header. */
+struct ibv_spec_header {
+	enum ibv_flow_spec_type type;
+	uint16_t size;
+};
+
+struct mlx5_flow_rss {
+	uint32_t level;
+	uint32_t queue_num; /**< Number of entries in @p queue. */
+	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+};
 
-/* DV flows structure. */
-struct mlx5_flow_dv {
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+/** Device flow handle structure for DV mode only. */
+struct mlx5_flow_handle_dv {
 	/* Flow DV api: */
 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
-	struct mlx5_flow_dv_match_params value;
-	/**< Holds the value that the packet is compared to. */
 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
 	/**< Pointer to encap/decap resource in cache. */
 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
 	/**< Pointer to modify header resource in cache. */
-	struct ibv_flow *flow; /**< Installed flow. */
 	struct mlx5_flow_dv_jump_tbl_resource *jump;
 	/**< Pointer to the jump action resource. */
 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
@@ -493,65 +496,64 @@ struct mlx5_flow_dv {
 	/**< Pointer to push VLAN action resource in cache. */
 	struct mlx5_flow_dv_tag_resource *tag_resource;
 	/**< pointer to the tag action. */
+};
+
+/** Device flow handle structure: used both for creating & destroying. */
+struct mlx5_flow_handle {
+	uint64_t layers;
+	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t act_flags;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+	void *ib_flow; /**< Verbs flow pointer. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
+	union {
+		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
-	/**< Action list. */
+	struct mlx5_flow_handle_dv dvh;
 #endif
-	int actions_n; /**< number of actions. */
 };
 
-/* Verbs specification header. */
-struct ibv_spec_header {
-	enum ibv_flow_spec_type type;
-	uint16_t size;
-};
+/*
+ * Max number of actions per DV flow.
+ * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
+ * in rdma-core file providers/mlx5/verbs.c.
+ */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
-	LIST_ENTRY(mlx5_flow_verbs) next;
-	unsigned int size; /**< Size of the attribute. */
-	struct {
-		struct ibv_flow_attr *attr;
-		/**< Pointer to the Specification buffer. */
-		uint8_t *specs; /**< Pointer to the specifications. */
-	};
-	struct ibv_flow *flow; /**< Verbs flow pointer. */
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
-	struct mlx5_vf_vlan vf_vlan;
-	/**< Structure for VF VLAN workaround. */
+/** Device flow structure only for DV flow creation. */
+struct mlx5_flow_resource_dv {
+	uint32_t group; /**< The group index. */
+	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+	int actions_n; /**< number of actions. */
+	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
 };
 
-struct mlx5_flow_rss {
-	uint32_t level;
-	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
-	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
-	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+/** Device flow structure only for Verbs flow creation. */
+struct mlx5_flow_resource_verbs {
+	unsigned int size; /**< Size of the attribute. */
+	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
+	uint8_t *specs; /**< Pointer to the specifications. */
 };
 
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next;
+	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
-	uint64_t layers;
-	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
-	uint64_t actions;
-	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
-	uint32_t group; /**< The group index. */
-	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_dv dv;
+		struct mlx5_flow_resource_dv dv;
 #endif
-		struct mlx5_flow_verbs verbs;
-	};
-	union {
-		uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
-		uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+		struct mlx5_flow_resource_verbs verbs;
 	};
-	bool external; /**< true if the flow is created external to PMD. */
+	struct mlx5_flow_handle handle;
 };
 
 /* Flow meter state. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2090631..d1eec96 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,20 +92,22 @@
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
+	uint64_t layers = dev_flow->handle.layers;
+
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
 	 * suffix flow, the layers flags is set by the prefix flow. Need to
 	 * use the layer flags from prefix flow as the suffix flow may not
 	 * have the user defined items as the flow is split.
 	 */
-	if (dev_flow->layers) {
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+	if (layers) {
+		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
 			attr->ipv4 = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
 			attr->ipv6 = 1;
-		if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+		if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
 			attr->tcp = 1;
-		else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+		else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
 			attr->udp = 1;
 		attr->valid = 1;
 		return;
@@ -2377,7 +2379,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 	struct mlx5dv_dr_domain *domain;
 
-	resource->flags = dev_flow->group ? 0 : 1;
+	resource->flags = dev_flow->dv.group ? 0 : 1;
 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
@@ -2397,7 +2399,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.encap_decap = cache_resource;
+			dev_flow->handle.dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2423,7 +2425,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->dv.encap_decap = cache_resource;
+	dev_flow->handle.dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2474,7 +2476,7 @@ struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->dv.jump = &tbl_data->jump;
+	dev_flow->handle.dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2512,7 +2514,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.port_id_action = cache_resource;
+			dev_flow->handle.dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2540,7 +2542,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->dv.port_id_action = cache_resource;
+	dev_flow->handle.dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2583,7 +2585,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.push_vlan_res = cache_resource;
+			dev_flow->handle.dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2612,7 +2614,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->dv.push_vlan_res = cache_resource;
+	dev_flow->handle.dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3699,8 +3701,8 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5dv_dr_domain *ns;
 	uint32_t actions_len;
 
-	resource->flags =
-		dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+	resource->flags = dev_flow->dv.group ? 0 :
+			  MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
 				    resource->flags))
 		return rte_flow_error_set(error, EOVERFLOW,
@@ -3725,7 +3727,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->dv.modify_hdr = cache_resource;
+			dev_flow->handle.dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3752,7 +3754,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->dv.modify_hdr = cache_resource;
+	dev_flow->handle.dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5236,7 +5238,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
 }
 
@@ -5392,7 +5394,7 @@ struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->dv.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6893,7 +6895,7 @@ struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->dv.matcher = cache_matcher;
+			dev_flow->handle.dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6930,7 +6932,7 @@ struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->dv.matcher = cache_matcher;
+	dev_flow->handle.dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6972,7 +6974,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->dv.tag_resource = cache_resource;
+		dev_flow->handle.dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7001,7 +7003,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->dv.tag_resource = cache_resource;
+	dev_flow->handle.dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7146,7 +7148,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->layers;
+	uint64_t items = dev_flow->handle.layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7271,7 +7273,7 @@ struct field_modify_info modify_tcp[] = {
 				       !!priv->fdb_def_rule, &table, error);
 	if (ret)
 		return ret;
-	dev_flow->group = table;
+	dev_flow->dv.group = table;
 	if (attr->transfer)
 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
 	if (priority == MLX5_FLOW_PRIO_RSVD)
@@ -7304,7 +7306,7 @@ struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.port_id_action->action;
+				dev_flow->handle.dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7322,12 +7324,12 @@ struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7349,12 +7351,12 @@ struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->dv.tag_resource)
+			if (!dev_flow->handle.dvh.tag_resource)
 				if (flow_dv_tag_resource_register
 				    (dev, tag_be, dev_flow, error))
 					return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.tag_resource->action;
+				dev_flow->handle.dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7404,9 +7406,9 @@ struct field_modify_info modify_tcp[] = {
 				goto cnt_err;
 			}
 			flow->counter = flow_dv_counter_alloc(dev,
-							      count->shared,
-							      count->id,
-							      dev_flow->group);
+							count->shared,
+							count->id,
+							dev_flow->dv.group);
 			if (flow->counter == NULL)
 				goto cnt_err;
 			dev_flow->dv.actions[actions_n++] =
@@ -7452,7 +7454,7 @@ struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-					   dev_flow->dv.push_vlan_res->action;
+				dev_flow->handle.dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7479,7 +7481,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7489,7 +7491,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7499,7 +7501,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7507,7 +7509,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7519,7 +7521,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+				dev_flow->handle.dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7551,7 +7553,7 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->dv.jump->action;
+				dev_flow->handle.dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7684,7 +7686,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-					dev_flow->dv.modify_hdr->verbs_action;
+				dev_flow->handle.dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7695,7 +7697,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7728,7 +7730,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv4(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
@@ -7751,7 +7753,7 @@ struct field_modify_info modify_tcp[] = {
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv6(match_mask, match_value,
 						    items, item_flags, tunnel,
-						    dev_flow->group);
+						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
@@ -7900,7 +7902,7 @@ struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->layers |= item_flags;
+	dev_flow->handle.layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7911,7 +7913,7 @@ struct field_modify_info modify_tcp[] = {
 	/* reserved field no needs to be set to 0 here. */
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
-	tbl_key.table_id = dev_flow->group;
+	tbl_key.table_id = dev_flow->dv.group;
 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
 		return -rte_errno;
 	return 0;
@@ -7935,21 +7937,25 @@ struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+		dh = &dev_flow->handle;
 		dv = &dev_flow->dv;
 		n = dv->actions_n;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			if (dev_flow->transfer) {
+		dv_h = &dh->dvh;
+		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
 			} else {
-				dv->hrxq = mlx5_hrxq_drop_new(dev);
-				if (!dv->hrxq) {
+				dh->hrxq = mlx5_hrxq_drop_new(dev);
+				if (!dh->hrxq) {
 					rte_flow_error_set
 						(error, errno,
 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7957,9 +7963,9 @@ struct field_modify_info modify_tcp[] = {
 						 "cannot get drop hash queue");
 					goto error;
 				}
-				dv->actions[n++] = dv->hrxq->action;
+				dv->actions[n++] = dh->hrxq->action;
 			}
-		} else if (dev_flow->actions &
+		} else if (dh->act_flags &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
@@ -7976,7 +7982,7 @@ struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->layers &
+					 !!(dev_flow->handle.layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -7986,14 +7992,14 @@ struct field_modify_info modify_tcp[] = {
 					 "cannot get hash queue");
 				goto error;
 			}
-			dv->hrxq = hrxq;
-			dv->actions[n++] = dv->hrxq->action;
+			dh->hrxq = hrxq;
+			dv->actions[n++] = dh->hrxq->action;
 		}
-		dv->flow =
-			mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+		dh->ib_flow =
+			mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
 						  (void *)&dv->value, n,
 						  dv->actions);
-		if (!dv->flow) {
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -8001,32 +8007,30 @@ struct field_modify_info modify_tcp[] = {
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->dv.vf_vlan.tag &&
-		    !dev_flow->dv.vf_vlan.created) {
+		    dh->vf_vlan.tag && !dh->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_dv *dv = &dev_flow->dv;
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
+		if (dh_tmp->hrxq) {
+			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh_tmp->hrxq);
+			dh_tmp->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8047,7 +8051,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_matcher_release(struct rte_eth_dev *dev,
 			struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8080,7 +8084,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->dv.encap_decap;
+						flow->handle.dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8113,7 +8117,8 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
 				  struct mlx5_flow *flow)
 {
-	struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+						flow->handle.dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8147,7 +8152,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->dv.modify_hdr;
+						flow->handle.dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8178,7 +8183,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-		flow->dv.port_id_action;
+						flow->handle.dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8209,7 +8214,7 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-		flow->dv.push_vlan_res;
+						flow->handle.dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8239,27 +8244,26 @@ struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_dv *dv;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dv = &dev_flow->dv;
-		if (dv->flow) {
-			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
-			dv->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (dv->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dv->hrxq);
-			dv->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dev_flow->dv.vf_vlan.tag &&
-		    dev_flow->dv.vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -8291,20 +8295,21 @@ struct field_modify_info modify_tcp[] = {
 	while (!LIST_EMPTY(&flow->dev_flows)) {
 		dev_flow = LIST_FIRST(&flow->dev_flows);
 		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->dv.matcher)
+		if (dev_flow->handle.dvh.matcher)
 			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->dv.encap_decap)
+		if (dev_flow->handle.dvh.encap_decap)
 			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->dv.modify_hdr)
+		if (dev_flow->handle.dvh.modify_hdr)
 			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->dv.jump)
+		if (dev_flow->handle.dvh.jump)
 			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->dv.port_id_action)
+		if (dev_flow->handle.dvh.port_id_action)
 			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->dv.push_vlan_res)
+		if (dev_flow->handle.dvh.push_vlan_res)
 			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->dv.tag_resource)
-			flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
+		if (dev_flow->handle.dvh.tag_resource)
+			flow_dv_tag_release(dev,
+					dev_flow->handle.dvh.tag_resource);
 		rte_free(dev_flow);
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 459e7b6..08185ec 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,8 @@
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
+flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+		    void *src, unsigned int size)
 {
 	void *dst;
 
@@ -393,7 +394,7 @@
 	else
 		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->verbs.vf_vlan.tag =
+		dev_flow->handle.vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -743,7 +744,7 @@
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -1418,7 +1419,7 @@
 	dev_flow->verbs.attr = (void *)(dev_flow + 1);
 	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
 	dev_flow->ingress = attr->ingress;
-	dev_flow->transfer = attr->transfer;
+	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
 }
 
@@ -1498,7 +1499,7 @@
 						  "action not supported");
 		}
 	}
-	dev_flow->actions = action_flags;
+	dev_flow->handle.act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1600,7 +1601,7 @@
 						  "item not supported");
 		}
 	}
-	dev_flow->layers = item_flags;
+	dev_flow->handle.layers = item_flags;
 	dev_flow->verbs.attr->priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
 	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
@@ -1618,28 +1619,26 @@
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->flow) {
-			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
-			verbs->flow = NULL;
+		dh = &dev_flow->handle;
+		if (dh->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
+			dh->ib_flow = NULL;
 		}
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 }
 
@@ -1688,15 +1687,15 @@
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_verbs *verbs;
+	struct mlx5_flow_handle *dh;
 	struct mlx5_flow *dev_flow;
 	int err;
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
-			verbs->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!verbs->hrxq) {
+		dh = &dev_flow->handle;
+		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
+			dh->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!dh->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1714,12 +1713,12 @@
 					     flow->rss.queue_num);
 			if (!hrxq)
 				hrxq = mlx5_hrxq_new(dev, flow->rss.key,
-						     MLX5_RSS_HASH_KEY_LEN,
-						     dev_flow->hash_fields,
-						     (*flow->rss.queue),
-						     flow->rss.queue_num,
-						     !!(dev_flow->layers &
-						       MLX5_FLOW_LAYER_TUNNEL));
+						MLX5_RSS_HASH_KEY_LEN,
+						dev_flow->hash_fields,
+						(*flow->rss.queue),
+						flow->rss.queue_num,
+						!!(dev_flow->handle.layers &
+						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -1727,11 +1726,11 @@
 					 "cannot get hash queue");
 				goto error;
 			}
-			verbs->hrxq = hrxq;
+			dh->hrxq = hrxq;
 		}
-		verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
-						     verbs->attr);
-		if (!verbs->flow) {
+		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
+						     dev_flow->verbs.attr);
+		if (!dh->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1739,33 +1738,31 @@
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->verbs.vf_vlan.tag &&
-		    !dev_flow->verbs.vf_vlan.created) {
+		    dev_flow->handle.vf_vlan.tag &&
+		    !dev_flow->handle.vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		verbs = &dev_flow->verbs;
-		if (verbs->hrxq) {
-			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+		dh = &dev_flow->handle;
+		if (dh->hrxq) {
+			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, verbs->hrxq);
-			verbs->hrxq = NULL;
-		}
-		if (dev_flow->verbs.vf_vlan.tag &&
-		    dev_flow->verbs.vf_vlan.created) {
-			mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
-- 
1.8.3.1


  parent reply	other threads:[~2020-03-24 15:17 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-04 11:33   ` [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-03-24 15:16   ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:16     ` Bing Zhao [this message]
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:16     ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-24 15:33   ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
2020-03-24 15:33     ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:34     ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-25  9:13     ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Matan Azrad
2020-03-29 15:50     ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1585062980-27196-3-git-send-email-bingz@mellanox.com \
    --to=bingz@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=orika@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).