From: Bing Zhao <bingz@mellanox.com>
To: orika@mellanox.com, viacheslavo@mellanox.com,
rasland@mellanox.com, matan@mellanox.com
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 4/6] net/mlx5: introduce handle structure for DV flows
Date: Mon, 3 Feb 2020 15:32:13 +0200 [thread overview]
Message-ID: <1580736735-19472-5-git-send-email-bingz@mellanox.com> (raw)
In-Reply-To: <1580736735-19472-1-git-send-email-bingz@mellanox.com>
Introduce a new structure "mlx5_flow_dv_handle" based on device flow
structures "mlx5_flow" and "mlx5_flow_dv", and in the meanwhile, the
"mlx5_flow" is kept for Verbs flow.
Only the matchers and actions objects will be saved in order to free
such resource when destroying a flow. The other information will be
stored by using some intermediate global variables that can be reused
for all flows when being created.
Inbox OFED driver should also be taken into consideration.
Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
drivers/net/mlx5/mlx5_flow.c | 184 +++++++++++++++++++-----
drivers/net/mlx5/mlx5_flow.h | 40 +++++-
drivers/net/mlx5/mlx5_flow_dv.c | 310 +++++++++++++++++++++-------------------
3 files changed, 350 insertions(+), 184 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8fb973b..1121904 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -709,19 +709,42 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to device flow structure.
+ * @param[in] type
+ * Driver type of the RTE flow.
+ * @param[in] sub_flow
+ * Pointer to device flow or flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ enum mlx5_flow_drv_type type __rte_unused,
+ void *sub_flow)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct rte_flow *flow;
+ int mark;
+ int tunnel;
+ uint64_t layers;
unsigned int i;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (type == MLX5_FLOW_TYPE_DV) {
+ struct mlx5_flow_dv_handle *handle = sub_flow;
+ mark = !!(handle->action_flags &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ layers = handle->layers;
+ tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+ flow = handle->m_flow;
+ } else {
+#endif
+ struct mlx5_flow *dev_flow = sub_flow;
+ mark = !!(dev_flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ layers = dev_flow->layers;
+ tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ }
+#endif
for (i = 0; i != flow->rss.queue_num; ++i) {
int idx = (*flow->rss.queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -747,8 +770,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ if ((tunnels_info[j].tunnel & layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
@@ -771,9 +793,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow *dev_flow;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_set(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_flow_dv_handle *handle;
+ if (type == MLX5_FLOW_TYPE_DV)
+ SLIST_FOREACH(handle, &flow->handles, next)
+ flow_drv_rxq_flags_set(dev, type, (void *)handle);
+ else
+#endif
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_set(dev, type, (void *)dev_flow);
}
/**
@@ -782,20 +812,44 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] dev_flow
- * Pointer to the device flow.
+ * @param[in] type
+ * Driver type of the RTE flow.
+ * @param[in] sub_flow
+ * Pointer to device flow or flow handle structure.
+
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+ enum mlx5_flow_drv_type type __rte_unused,
+ void *sub_flow)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct rte_flow *flow;
+ int mark;
+ int tunnel;
+ uint64_t layers;
unsigned int i;
MLX5_ASSERT(dev->data->dev_started);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (type == MLX5_FLOW_TYPE_DV) {
+ struct mlx5_flow_dv_handle *handle = sub_flow;
+ mark = !!(handle->action_flags &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ layers = handle->layers;
+ tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+ flow = handle->m_flow;
+ } else {
+#endif
+ struct mlx5_flow *dev_flow = sub_flow;
+ mark = !!(dev_flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ layers = dev_flow->layers;
+ tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);
+ flow = dev_flow->flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ }
+#endif
for (i = 0; i != flow->rss.queue_num; ++i) {
int idx = (*flow->rss.queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -816,8 +870,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ if ((tunnels_info[j].tunnel & layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
@@ -841,9 +894,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow *dev_flow;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_trim(dev, dev_flow);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_flow_dv_handle *handle;
+ if (type == MLX5_FLOW_TYPE_DV)
+ SLIST_FOREACH(handle, &flow->handles, next)
+ flow_drv_rxq_flags_trim(dev, type, (void *)handle);
+ else
+#endif
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow);
}
/**
@@ -2341,10 +2402,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
struct rte_flow *flow)
{
struct mlx5_flow *dev_flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_flow_dv_handle *handle;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- if (dev_flow->qrss_id)
- flow_qrss_free_id(dev, dev_flow->qrss_id);
+ if (type == MLX5_FLOW_TYPE_DV) {
+ SLIST_FOREACH(handle, &flow->handles, next)
+ if (handle->qrss_id)
+ flow_qrss_free_id(dev, handle->qrss_id);
+ } else {
+#endif
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ if (dev_flow->qrss_id)
+ flow_qrss_free_id(dev, dev_flow->qrss_id);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ }
+#endif
}
static int
@@ -3434,10 +3507,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
if (!dev_flow)
return -rte_errno;
- dev_flow->flow = flow;
dev_flow->external = external;
- /* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+ SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next);
+ dev_flow->dv_handle->sidx = flow->sub_flows++;
+ dev_flow->dv_handle->m_flow = flow;
+ } else {
+#endif
+ /* Subflow obj was created, we must include one in the list. */
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ }
+#endif
if (sub_flow)
*sub_flow = dev_flow;
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3900,6 +3983,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
* other flows in other threads).
*/
dev_flow->qrss_id = qrss_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+ dev_flow->dv_handle->qrss_id = qrss_id;
+#endif
qrss_id = 0;
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
@@ -4012,6 +4099,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
goto exit;
}
dev_flow->mtr_flow_id = mtr_tag_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+ dev_flow->dv_handle->mtr_flow_id = mtr_tag_id;
+#endif
/* Prepare the suffix flow match pattern. */
sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
act_size);
@@ -4164,6 +4255,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
@@ -4192,10 +4284,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
goto error_before_flow;
}
flow->drv_type = type;
+ flow->sub_flows = 0;
if (hairpin_id != 0)
flow->hairpin_flow_id = hairpin_id;
- MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
- flow->drv_type < MLX5_FLOW_TYPE_MAX);
flow->rss.queue = (void *)(flow + 1);
if (rss) {
/*
@@ -4206,7 +4297,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_flows);
+ if (flow->drv_type == MLX5_FLOW_TYPE_DV)
+ SLIST_INIT(&flow->handles);
+ else
+ LIST_INIT(&flow->dev_flows);
if (rss && rss->types) {
unsigned int graph_root;
@@ -4243,9 +4337,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
actions_hairpin_tx.actions, error);
if (!dev_flow)
goto error;
- dev_flow->flow = flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (flow->drv_type == MLX5_FLOW_TYPE_DV) {
+ SLIST_INSERT_HEAD(&flow->handles,
+ dev_flow->dv_handle, next);
+ dev_flow->dv_handle->sidx = flow->sub_flows++;
+ dev_flow->dv_handle->m_flow = flow;
+ } else {
+#endif
+ dev_flow->flow = flow;
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ }
+#endif
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
@@ -4363,8 +4468,17 @@ struct rte_flow *
struct mlx5_flows *flow_list;
enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
- flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows :
- &priv->cached_flows;
+ if (type == MLX5_FLOW_TYPE_DV) {
+ if (unlikely(!dev->data->dev_started)) {
+ rte_errno = ENODEV;
+ DRV_LOG(DEBUG, "port %u is not started when "
+ "inserting a flow", dev->data->port_id);
+ return NULL;
+ }
+ flow_list = &priv->noncached_flows;
+ } else {
+ flow_list = &priv->cached_flows;
+ }
return flow_list_create(dev, flow_list, attr,
items, actions, true, type, error);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 7c31bfe..10ac9c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -468,6 +468,39 @@ struct mlx5_flow_tbl_data_entry {
/**< jump resource, at most one for each table created. */
};
+struct mlx5_flow_dv_handle {
+ SLIST_ENTRY(mlx5_flow_dv_handle) next;
+ struct rte_flow *m_flow; /**< Pointer to the main flow. */
+ uint64_t layers;
+ /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+ uint64_t action_flags;
+ /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+ struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
+ struct mlx5_flow_dv_match_params value;
+ /**< Holds the value that the packet is compared to. */
+ struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+ /**< Pointer to encap/decap resource in cache. */
+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+ /**< Pointer to modify header resource in cache. */
+ struct mlx5_flow_dv_jump_tbl_resource *jump;
+ /**< Pointer to the jump action resource. */
+ struct mlx5_flow_dv_port_id_action_resource *port_id_action;
+ /**< Pointer to port ID action resource. */
+ struct mlx5_vf_vlan vf_vlan;
+ /**< Structure for VF VLAN workaround. */
+ struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
+ /**< Pointer to push VLAN action resource in cache. */
+ struct mlx5_flow_dv_tag_resource *tag_resource;
+ /**< pointer to the tag action. */
+ struct ibv_flow *flow; /**< Installed flow. */
+ union {
+ uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
+ uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+ };
+ uint8_t sidx;
+};
+
/*
* Max number of actions per DV flow.
* See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
@@ -547,12 +580,12 @@ struct mlx5_flow {
uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_flow_dv dv;
+ struct mlx5_flow_dv_handle *dv_handle;
#endif
struct mlx5_flow_verbs verbs;
};
union {
- uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+ uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */
uint32_t mtr_flow_id; /**< Unique meter match flow id. */
};
bool external; /**< true if the flow is created external to PMD. */
@@ -674,6 +707,9 @@ struct rte_flow {
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
+ SLIST_HEAD(, mlx5_flow_dv_handle) handles;
+ /**< The HEAD of DV handles. */
+ uint8_t sub_flows;
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2878393..2013082 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -75,6 +75,16 @@
uint32_t attr;
};
+/* Global temporary device flow. */
+struct mlx5_flow sflow;
+/* Global subsidiary device flows actions' list. */
+struct {
+ void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+ uint64_t hash_fields;
+ int actions_n;
+ uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+} sflow_act[8];
+
/**
* Initialize flow attributes structure according to flow items' types.
*
@@ -2348,7 +2358,7 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->dv_handle->encap_decap = cache_resource;
return 0;
}
}
@@ -2374,7 +2384,7 @@ struct field_modify_info modify_tcp[] = {
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->dv_handle->encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -2425,7 +2435,7 @@ struct field_modify_info modify_tcp[] = {
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->dv.jump = &tbl_data->jump;
+ dev_flow->dv_handle->jump = &tbl_data->jump;
return 0;
}
@@ -2463,7 +2473,7 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->dv_handle->port_id_action = cache_resource;
return 0;
}
}
@@ -2491,7 +2501,7 @@ struct field_modify_info modify_tcp[] = {
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->dv_handle->port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -2534,7 +2544,7 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->dv_handle->push_vlan_res = cache_resource;
return 0;
}
}
@@ -2563,7 +2573,7 @@ struct field_modify_info modify_tcp[] = {
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->dv_handle->push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -3652,7 +3662,7 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->dv_handle->modify_hdr = cache_resource;
return 0;
}
}
@@ -3679,7 +3689,7 @@ struct field_modify_info modify_tcp[] = {
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->dv_handle->modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -5102,19 +5112,24 @@ struct field_modify_info modify_tcp[] = {
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow);
+ size_t size = sizeof(struct mlx5_flow_dv_handle);
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_dv_handle *dv_handle;
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ /* No need to clear to 0. */
+ dev_flow = &sflow;
+ dv_handle = rte_zmalloc(__func__, size, 0);
+ if (!dv_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
- dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
dev_flow->ingress = attr->ingress;
dev_flow->transfer = attr->transfer;
+ dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /* DV support already defined, compiler will happy for inbox driver. */
+ dev_flow->dv_handle = dv_handle;
return dev_flow;
}
@@ -5253,7 +5268,7 @@ struct field_modify_info modify_tcp[] = {
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->dv.vf_vlan.tag =
+ dev_flow->dv_handle->vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6712,7 +6727,7 @@ struct field_modify_info modify_tcp[] = {
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->dv_handle->matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
@@ -6749,7 +6764,7 @@ struct field_modify_info modify_tcp[] = {
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->dv_handle->matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
@@ -6791,7 +6806,7 @@ struct field_modify_info modify_tcp[] = {
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->dv_handle->tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -6820,7 +6835,7 @@ struct field_modify_info modify_tcp[] = {
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->dv_handle->tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
@@ -7022,6 +7037,9 @@ struct field_modify_info modify_tcp[] = {
dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
}
}
+ /* No need to save the hash fileds after creation. */
+ sflow_act[dev_flow->dv_handle->sidx].hash_fields =
+ dev_flow->hash_fields;
}
/**
@@ -7065,6 +7083,7 @@ struct field_modify_info modify_tcp[] = {
},
};
int actions_n = 0;
+ uint8_t sidx = dev_flow->dv_handle->sidx;
bool actions_end = false;
union {
struct mlx5_flow_dv_modify_hdr_resource res;
@@ -7076,9 +7095,9 @@ struct field_modify_info modify_tcp[] = {
union flow_dv_attr flow_attr = { .attr = 0 };
uint32_t tag_be;
union mlx5_flow_tbl_key tbl_key;
- uint32_t modify_action_position = UINT32_MAX;
+ uint32_t modify_action_pos = UINT32_MAX;
void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
+ void *match_value = dev_flow->dv_handle->value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
uint32_t table;
@@ -7122,8 +7141,8 @@ struct field_modify_info modify_tcp[] = {
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7132,7 +7151,6 @@ struct field_modify_info modify_tcp[] = {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
};
-
if (flow_dv_convert_action_mark(dev, &mark,
mhdr_res,
error))
@@ -7141,12 +7159,12 @@ struct field_modify_info modify_tcp[] = {
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->dv.tag_resource)
+ if (!dev_flow->dv_handle->tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7168,12 +7186,12 @@ struct field_modify_info modify_tcp[] = {
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->dv.tag_resource)
+ if (!dev_flow->dv_handle->tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
@@ -7228,7 +7246,7 @@ struct field_modify_info modify_tcp[] = {
dev_flow->group);
if (flow->counter == NULL)
goto cnt_err;
- dev_flow->dv.actions[actions_n++] =
+ sflow_act[sidx].actions[actions_n++] =
flow->counter->action;
action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
@@ -7248,7 +7266,7 @@ struct field_modify_info modify_tcp[] = {
" object.");
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- dev_flow->dv.actions[actions_n++] =
+ sflow_act[sidx].actions[actions_n++] =
priv->sh->pop_vlan_action;
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
@@ -7270,8 +7288,8 @@ struct field_modify_info modify_tcp[] = {
if (flow_dv_create_action_push_vlan
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7297,8 +7315,8 @@ struct field_modify_info modify_tcp[] = {
attr->transfer,
error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->encap_decap->verbs_action;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
MLX5_FLOW_ACTION_VXLAN_ENCAP :
@@ -7310,8 +7328,8 @@ struct field_modify_info modify_tcp[] = {
attr->transfer,
error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->encap_decap->verbs_action;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
MLX5_FLOW_ACTION_VXLAN_DECAP :
@@ -7323,16 +7341,16 @@ struct field_modify_info modify_tcp[] = {
if (flow_dv_create_action_raw_encap
(dev, actions, dev_flow, attr, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
(dev, actions, dev_flow, attr->transfer,
error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
break;
@@ -7347,8 +7365,8 @@ struct field_modify_info modify_tcp[] = {
if (flow_dv_create_action_l2_decap
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
@@ -7379,8 +7397,8 @@ struct field_modify_info modify_tcp[] = {
NULL,
"cannot create jump action.");
}
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
+ sflow_act[sidx].actions[actions_n++] =
+ dev_flow->dv_handle->jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7485,7 +7503,7 @@ struct field_modify_info modify_tcp[] = {
"or invalid parameters");
}
/* Set the meter action. */
- dev_flow->dv.actions[actions_n++] =
+ sflow_act[sidx].actions[actions_n++] =
flow->meter->mfts->meter_action;
action_flags |= MLX5_FLOW_ACTION_METER;
break;
@@ -7508,19 +7526,19 @@ struct field_modify_info modify_tcp[] = {
if (flow_dv_modify_hdr_resource_register
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
- dev_flow->dv.actions[modify_action_position] =
- dev_flow->dv.modify_hdr->verbs_action;
+ sflow_act[sidx].actions[modify_action_pos] =
+ dev_flow->dv_handle->modify_hdr->verbs_action;
}
break;
default:
break;
}
- if (mhdr_res->actions_num &&
- modify_action_position == UINT32_MAX)
- modify_action_position = actions_n++;
+ if (mhdr_res->actions_num && modify_action_pos == UINT32_MAX)
+ modify_action_pos = actions_n++;
}
- dev_flow->dv.actions_n = actions_n;
- dev_flow->actions = action_flags;
+ sflow_act[sidx].actions_n = actions_n;
+ sflow_act[sidx].transfer = dev_flow->transfer;
+ dev_flow->dv_handle->action_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
@@ -7707,7 +7725,7 @@ struct field_modify_info modify_tcp[] = {
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
#endif
- dev_flow->layers = item_flags;
+ dev_flow->dv_handle->layers = item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
@@ -7742,21 +7760,23 @@ struct field_modify_info modify_tcp[] = {
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv *dv;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_dv_handle *dv_handle;
struct mlx5_priv *priv = dev->data->dev_private;
+ void *matcher_obj;
int n;
int err;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- n = dv->actions_n;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (dev_flow->transfer) {
- dv->actions[n++] = priv->sh->esw_drop_action;
+ SLIST_FOREACH(dv_handle, &flow->handles, next) {
+ uint8_t sidx = dv_handle->sidx;
+ n = sflow_act[sidx].actions_n;
+
+ if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) {
+ if (sflow_act[sidx].transfer) {
+ sflow_act[sidx].actions[n++] =
+ priv->sh->esw_drop_action;
} else {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
+ dv_handle->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dv_handle->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7764,26 +7784,27 @@ struct field_modify_info modify_tcp[] = {
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dv->hrxq->action;
+ sflow_act[sidx].actions[n++] =
+ dv_handle->hrxq->action;
}
- } else if (dev_flow->actions &
+ } else if (dv_handle->action_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
MLX5_ASSERT(flow->rss.queue);
hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
+ sflow_act[sidx].hash_fields,
(*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq) {
hrxq = mlx5_hrxq_new
(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
+ sflow_act[sidx].hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->layers &
+ !!(dv_handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
@@ -7793,47 +7814,45 @@ struct field_modify_info modify_tcp[] = {
"cannot get hash queue");
goto error;
}
- dv->hrxq = hrxq;
- dv->actions[n++] = dv->hrxq->action;
+ dv_handle->hrxq = hrxq;
+ sflow_act[sidx].actions[n++] = hrxq->action;
}
- dv->flow =
- mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
- (void *)&dv->value, n,
- dv->actions);
- if (!dv->flow) {
+ matcher_obj = dv_handle->matcher->matcher_object;
+ dv_handle->flow =
+ mlx5_glue->dv_create_flow(matcher_obj,
+ (void *)&dv_handle->value,
+ n, sflow_act[sidx].actions);
+ if (!dv_handle->flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"hardware refuses to create flow");
goto error;
}
- if (priv->vmwa_context &&
- dev_flow->dv.vf_vlan.tag &&
- !dev_flow->dv.vf_vlan.created) {
+ if (priv->vmwa_context && dv_handle->vf_vlan.tag &&
+ !dv_handle->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dv_handle->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_dv *dv = &dev_flow->dv;
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ SLIST_FOREACH(dv_handle, &flow->handles, next) {
+ if (dv_handle->hrxq) {
+ if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dv_handle->hrxq);
+ dv_handle->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
@@ -7844,17 +7863,17 @@ struct field_modify_info modify_tcp[] = {
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_dv_handle *handle)
{
- struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_flow_dv_matcher *matcher = handle->matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -7877,17 +7896,17 @@ struct field_modify_info modify_tcp[] = {
/**
* Release an encap/decap resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_dv_handle *handle)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->dv.encap_decap;
+ handle->encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -7910,17 +7929,17 @@ struct field_modify_info modify_tcp[] = {
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_dv_handle *handle)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource = handle->jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
@@ -7944,17 +7963,17 @@ struct field_modify_info modify_tcp[] = {
/**
* Release a modify-header resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_dv_handle *handle)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->dv.modify_hdr;
+ handle->modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -7975,17 +7994,17 @@ struct field_modify_info modify_tcp[] = {
/**
* Release port ID action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_dv_handle *handle)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->dv.port_id_action;
+ handle->port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8006,17 +8025,17 @@ struct field_modify_info modify_tcp[] = {
/**
* Release push vlan action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_dv_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_dv_handle *handle)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->dv.push_vlan_res;
+ handle->push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8046,27 +8065,24 @@ struct field_modify_info modify_tcp[] = {
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_dv *dv;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_dv_handle *dv_handle;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- if (dv->flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
- dv->flow = NULL;
+ SLIST_FOREACH(dv_handle, &flow->handles, next) {
+ if (dv_handle->flow) {
+ claim_zero(mlx5_glue->dv_destroy_flow(dv_handle->flow));
+ dv_handle->flow = NULL;
}
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dv_handle->hrxq) {
+ if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dv_handle->hrxq);
+ dv_handle->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan);
}
}
@@ -8082,7 +8098,7 @@ struct field_modify_info modify_tcp[] = {
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_dv_handle *dv_handle;
if (!flow)
return;
@@ -8095,24 +8111,24 @@ struct field_modify_info modify_tcp[] = {
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- if (dev_flow->dv.matcher)
- flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->dv.encap_decap)
- flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->dv.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->dv.port_id_action)
- flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->dv.push_vlan_res)
- flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->dv.tag_resource)
- flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
- rte_free(dev_flow);
+ while (!SLIST_EMPTY(&flow->handles)) {
+ dv_handle = SLIST_FIRST(&flow->handles);
+ SLIST_REMOVE_HEAD(&flow->handles, next);
+ if (dv_handle->matcher)
+ flow_dv_matcher_release(dev, dv_handle);
+ if (dv_handle->encap_decap)
+ flow_dv_encap_decap_resource_release(dv_handle);
+ if (dv_handle->modify_hdr)
+ flow_dv_modify_hdr_resource_release(dv_handle);
+ if (dv_handle->jump)
+ flow_dv_jump_tbl_resource_release(dev, dv_handle);
+ if (dv_handle->port_id_action)
+ flow_dv_port_id_action_resource_release(dv_handle);
+ if (dv_handle->push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dv_handle);
+ if (dv_handle->tag_resource)
+ flow_dv_tag_release(dev, dv_handle->tag_resource);
+ rte_free(dv_handle);
}
}
--
1.8.3.1
next prev parent reply other threads:[~2020-02-03 13:33 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-03 13:32 [dpdk-dev] [PATCH 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-03 13:32 ` Bing Zhao [this message]
2020-02-03 13:32 ` [dpdk-dev] [PATCH 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-03 13:32 ` [dpdk-dev] [PATCH 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 0/6] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking Bing Zhao
2020-02-04 11:33 ` [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value Bing Zhao
2020-03-24 15:16 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:16 ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:16 ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
2020-03-24 15:16 ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:16 ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-24 15:33 ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Bing Zhao
2020-03-24 15:33 ` [dpdk-dev] [PATCH v4 1/4] net/mlx5: change operations for non-cached flows Bing Zhao
2020-03-24 15:33 ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: reorganize mlx5 flow structures Bing Zhao
2020-03-24 15:33 ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: separate the flow handle resource Bing Zhao
2020-03-24 15:34 ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: check device stat before creating flow Bing Zhao
2020-03-25 9:13 ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: move to non-cached mode for flow rules Matan Azrad
2020-03-29 15:50 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1580736735-19472-5-git-send-email-bingz@mellanox.com \
--to=bingz@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=orika@mellanox.com \
--cc=rasland@mellanox.com \
--cc=viacheslavo@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).