* [dpdk-dev] [PATCH v1] net/mlx5: fix meter cleaning in stop operation
@ 2021-05-13 7:35 Li Zhang
2021-05-13 8:35 ` [dpdk-dev] [PATCH v2] " Li Zhang
2021-05-13 9:56 ` [dpdk-dev] [PATCH v1] " Matan Azrad
0 siblings, 2 replies; 4+ messages in thread
From: Li Zhang @ 2021-05-13 7:35 UTC (permalink / raw)
To: orika, viacheslavo, matan, shahafs; +Cc: dev, thomas, rasland, roniba
A meter may handle Rx queue reference in his sub-policies.
In stop operation, all the Rx queues are released.
Wrongly, the meter reference was not released before
destroying the Rx queues what cause an error in stop.
Release the Rx queues meter references in stop operation.
Fixes: fc6ce56bba ("net/mlx5: prepare sub-policy for flow with meter")
Signed-off-by: Li Zhang <lizh@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 5 ++
drivers/net/mlx5/mlx5_flow.c | 77 ++++++++++++++------
drivers/net/mlx5/mlx5_flow.h | 6 ++
drivers/net/mlx5/mlx5_flow_dv.c | 109 +++++++++++++++++++----------
drivers/net/mlx5/mlx5_flow_meter.c | 36 +++++++++-
drivers/net/mlx5/mlx5_trigger.c | 2 +-
6 files changed, 175 insertions(+), 60 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7eca6a6fa6..b8a29dd369 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -668,6 +668,8 @@ struct mlx5_meter_policy_action_container {
/* Index to port ID action resource. */
void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
/* Jump/drop action per color. */
+ uint16_t queue;
+ /* Queue action configuration. */
};
};
@@ -681,6 +683,8 @@ struct mlx5_flow_meter_policy {
/* Rule applies to egress domain. */
uint32_t transfer:1;
/* Rule applies to transfer domain. */
+ uint32_t is_queue:1;
+ /* Is queue action in policy table. */
rte_spinlock_t sl;
uint32_t ref_cnt;
/* Use count. */
@@ -1655,6 +1659,7 @@ struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
uint32_t *policy_idx);
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
struct rte_mtr_error *error);
+void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
/* mlx5_os.c */
struct rte_pci_driver;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 23d4224ec5..dbeca571b6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4567,7 +4567,9 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
"Failed to find Meter Policy.");
goto exit;
}
- if (policy->is_rss) {
+ if (policy->is_rss ||
+ (policy->is_queue &&
+ !policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) {
struct mlx5_flow_workspace *wks =
mlx5_flow_get_thread_workspace();
struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
@@ -4583,34 +4585,49 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
struct mlx5_flow dev_flow = {0};
struct mlx5_flow_handle dev_handle = { {0} };
- const void *rss_act = policy->act_cnt[i].rss->conf;
- struct rte_flow_action rss_actions[2] = {
- [0] = {
+
+ rss_desc_v[i] = wks->rss_desc;
+ if (policy->is_rss) {
+ const void *rss_act =
+ policy->act_cnt[i].rss->conf;
+ struct rte_flow_action rss_actions[2] = {
+ [0] = {
.type = RTE_FLOW_ACTION_TYPE_RSS,
.conf = rss_act
- },
- [1] = {
+ },
+ [1] = {
.type = RTE_FLOW_ACTION_TYPE_END,
.conf = NULL
- }
- };
+ }
+ };
- dev_flow.handle = &dev_handle;
- dev_flow.ingress = attr->ingress;
- dev_flow.flow = flow;
- dev_flow.external = 0;
+ dev_flow.handle = &dev_handle;
+ dev_flow.ingress = attr->ingress;
+ dev_flow.flow = flow;
+ dev_flow.external = 0;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- dev_flow.dv.transfer = attr->transfer;
+ dev_flow.dv.transfer = attr->transfer;
#endif
- /* Translate RSS action to get rss hash fields. */
- if (flow_drv_translate(dev, &dev_flow, attr,
+ /**
+ * Translate RSS action to get rss hash fields.
+ */
+ if (flow_drv_translate(dev, &dev_flow, attr,
items, rss_actions, error))
- goto exit;
- rss_desc_v[i] = wks->rss_desc;
- rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
- rss_desc_v[i].hash_fields = dev_flow.hash_fields;
- rss_desc_v[i].queue_num = rss_desc_v[i].hash_fields ?
- rss_desc_v[i].queue_num : 1;
+ goto exit;
+ rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc_v[i].hash_fields =
+ dev_flow.hash_fields;
+ rss_desc_v[i].queue_num =
+ rss_desc_v[i].hash_fields ?
+ rss_desc_v[i].queue_num : 1;
+ } else {
+ /* This is queue action. */
+ rss_desc_v[i].key_len = 0;
+ rss_desc_v[i].hash_fields = 0;
+ rss_desc_v[i].queue =
+ &policy->act_cnt[i].queue;
+ rss_desc_v[i].queue_num = 1;
+ }
rss_desc[i] = &rss_desc_v[i];
}
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
@@ -7223,6 +7240,24 @@ mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
fops->destroy_mtr_drop_tbls(dev);
}
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+void
+mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
+}
+
/**
* Allocate the needed aso flow meter id.
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 04c8806bf6..2f2aa962f9 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1156,6 +1156,9 @@ typedef struct mlx5_flow_meter_sub_policy *
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
@@ -1249,6 +1252,7 @@ struct mlx5_flow_driver_ops {
mlx5_flow_create_def_policy_t create_def_policy;
mlx5_flow_destroy_def_policy_t destroy_def_policy;
mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
+ mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
@@ -1562,6 +1566,8 @@ struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7fc7efbc5c..c7a0a38650 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -14707,12 +14707,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
MLX5_ASSERT(dev_flow.dv.tag_resource);
act_cnt->rix_mark =
dev_flow.handle->dvh.rix_tag;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_MARK;
break;
}
@@ -14760,12 +14754,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
"set tag action");
act_cnt->modify_hdr =
dev_flow.handle->dvh.modify_hdr;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
}
@@ -14809,41 +14797,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
}
case RTE_FLOW_ACTION_TYPE_QUEUE:
{
- struct mlx5_hrxq *hrxq;
- uint32_t hrxq_idx;
- struct mlx5_flow_rss_desc rss_desc;
- struct mlx5_flow_meter_sub_policy *sub_policy =
- mtr_policy->sub_policys[domain][0];
-
if (i >= MLX5_MTR_RTE_COLORS)
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
NULL, "cannot create policy "
"fate queue for this color");
- memset(&rss_desc, 0,
- sizeof(struct mlx5_flow_rss_desc));
- rss_desc.queue_num = 1;
- rss_desc.const_q = act->conf;
- hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
- &rss_desc, &hrxq_idx);
- if (!hrxq)
- return -rte_mtr_error_set(error,
- ENOTSUP,
- RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL,
- "cannot create policy fate queue");
- sub_policy->rix_hrxq[i] = hrxq_idx;
+ act_cnt->queue =
+ ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
act_cnt->fate_action =
MLX5_FLOW_FATE_QUEUE;
dev_flow.handle->fate_action =
MLX5_FLOW_FATE_QUEUE;
- if (action_flags & MLX5_FLOW_ACTION_MARK ||
- action_flags & MLX5_FLOW_ACTION_SET_TAG) {
- dev_flow.handle->rix_hrxq = hrxq_idx;
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
+ mtr_policy->is_queue = 1;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
}
@@ -16057,6 +16024,73 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
return NULL;
}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ uint32_t i, j;
+ uint16_t sub_policy_num, new_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ new_policy_num = sub_policy_num;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy =
+ mtr_policy->sub_policys[domain][j];
+ if (sub_policy) {
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ if (sub_policy !=
+ mtr_policy->sub_policys[domain][0]) {
+ mtr_policy->sub_policys[domain][j] =
+ NULL;
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ new_policy_num--;
+ }
+ }
+ }
+ if (new_policy_num != sub_policy_num) {
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+ mtr_policy->sub_policy_num |=
+ (new_policy_num &
+ MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+ }
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ break;
+ default:
+ /*Other actions without queue and do nothing*/
+ break;
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+}
+
/**
* Validate the batch counter support in root table.
*
@@ -16668,6 +16702,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.create_def_policy = flow_dv_create_def_policy,
.destroy_def_policy = flow_dv_destroy_def_policy,
.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
+ .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ac2735e6e2..16991748dc 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -748,7 +748,7 @@ mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
policy->actions, error);
if (ret)
goto policy_add_err;
- if (!is_rss) {
+ if (!is_rss && !mtr_policy->is_queue) {
/* Create policy rules in HW. */
ret = mlx5_flow_create_policy_rules(dev, mtr_policy);
if (ret)
@@ -1808,6 +1808,40 @@ mlx5_flow_meter_detach(struct mlx5_priv *priv,
#endif
}
+/**
+ * Flush meter with Rx queue configuration.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ struct mlx5_flow_meter_policy *mtr_policy;
+ void *entry;
+ uint32_t i, policy_idx;
+
+ if (!priv->mtr_en)
+ return;
+ if (priv->sh->mtrmng->policy_idx_tbl && priv->sh->refcnt == 1) {
+ MLX5_L3T_FOREACH(priv->sh->mtrmng->policy_idx_tbl,
+ i, entry) {
+ policy_idx = *(uint32_t *)entry;
+ sub_policy = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ policy_idx);
+ if (!sub_policy || !sub_policy->main_policy)
+ continue;
+ mtr_policy = sub_policy->main_policy;
+ if (mtr_policy->is_queue || mtr_policy->is_rss)
+ mlx5_flow_destroy_sub_policy_with_rxq(dev,
+ mtr_policy);
+ }
+ }
+}
+
/**
* Flush meter configuration.
*
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index eb8c99cd93..879d3171e9 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1180,7 +1180,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
mlx5_traffic_disable(dev);
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, &priv->flows, true);
- mlx5_flow_meter_flush(dev, NULL);
+ mlx5_flow_meter_rxq_flush(dev);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
--
2.27.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-dev] [PATCH v2] net/mlx5: fix meter cleaning in stop operation
2021-05-13 7:35 [dpdk-dev] [PATCH v1] net/mlx5: fix meter cleaning in stop operation Li Zhang
@ 2021-05-13 8:35 ` Li Zhang
2021-05-13 13:01 ` Thomas Monjalon
2021-05-13 9:56 ` [dpdk-dev] [PATCH v1] " Matan Azrad
1 sibling, 1 reply; 4+ messages in thread
From: Li Zhang @ 2021-05-13 8:35 UTC (permalink / raw)
To: orika, viacheslavo, matan, shahafs; +Cc: dev, thomas, rasland, roniba
A meter may handle Rx queue reference in his sub-policies.
In stop operation, all the Rx queues are released.
Wrongly, the meter reference was not released before
destroying the Rx queues what cause an error in stop.
Release the Rx queues meter references in stop operation.
Fixes: fc6ce56bba ("net/mlx5: prepare sub-policy for flow with meter")
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 5 ++
drivers/net/mlx5/mlx5_flow.c | 77 ++++++++++++++------
drivers/net/mlx5/mlx5_flow.h | 6 ++
drivers/net/mlx5/mlx5_flow_dv.c | 109 +++++++++++++++++++----------
drivers/net/mlx5/mlx5_flow_meter.c | 36 +++++++++-
drivers/net/mlx5/mlx5_trigger.c | 2 +-
6 files changed, 175 insertions(+), 60 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7eca6a6fa6..b8a29dd369 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -668,6 +668,8 @@ struct mlx5_meter_policy_action_container {
/* Index to port ID action resource. */
void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
/* Jump/drop action per color. */
+ uint16_t queue;
+ /* Queue action configuration. */
};
};
@@ -681,6 +683,8 @@ struct mlx5_flow_meter_policy {
/* Rule applies to egress domain. */
uint32_t transfer:1;
/* Rule applies to transfer domain. */
+ uint32_t is_queue:1;
+ /* Is queue action in policy table. */
rte_spinlock_t sl;
uint32_t ref_cnt;
/* Use count. */
@@ -1655,6 +1659,7 @@ struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
uint32_t *policy_idx);
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
struct rte_mtr_error *error);
+void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
/* mlx5_os.c */
struct rte_pci_driver;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 23d4224ec5..dbeca571b6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4567,7 +4567,9 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
"Failed to find Meter Policy.");
goto exit;
}
- if (policy->is_rss) {
+ if (policy->is_rss ||
+ (policy->is_queue &&
+ !policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) {
struct mlx5_flow_workspace *wks =
mlx5_flow_get_thread_workspace();
struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
@@ -4583,34 +4585,49 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
struct mlx5_flow dev_flow = {0};
struct mlx5_flow_handle dev_handle = { {0} };
- const void *rss_act = policy->act_cnt[i].rss->conf;
- struct rte_flow_action rss_actions[2] = {
- [0] = {
+
+ rss_desc_v[i] = wks->rss_desc;
+ if (policy->is_rss) {
+ const void *rss_act =
+ policy->act_cnt[i].rss->conf;
+ struct rte_flow_action rss_actions[2] = {
+ [0] = {
.type = RTE_FLOW_ACTION_TYPE_RSS,
.conf = rss_act
- },
- [1] = {
+ },
+ [1] = {
.type = RTE_FLOW_ACTION_TYPE_END,
.conf = NULL
- }
- };
+ }
+ };
- dev_flow.handle = &dev_handle;
- dev_flow.ingress = attr->ingress;
- dev_flow.flow = flow;
- dev_flow.external = 0;
+ dev_flow.handle = &dev_handle;
+ dev_flow.ingress = attr->ingress;
+ dev_flow.flow = flow;
+ dev_flow.external = 0;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- dev_flow.dv.transfer = attr->transfer;
+ dev_flow.dv.transfer = attr->transfer;
#endif
- /* Translate RSS action to get rss hash fields. */
- if (flow_drv_translate(dev, &dev_flow, attr,
+ /**
+ * Translate RSS action to get rss hash fields.
+ */
+ if (flow_drv_translate(dev, &dev_flow, attr,
items, rss_actions, error))
- goto exit;
- rss_desc_v[i] = wks->rss_desc;
- rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
- rss_desc_v[i].hash_fields = dev_flow.hash_fields;
- rss_desc_v[i].queue_num = rss_desc_v[i].hash_fields ?
- rss_desc_v[i].queue_num : 1;
+ goto exit;
+ rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc_v[i].hash_fields =
+ dev_flow.hash_fields;
+ rss_desc_v[i].queue_num =
+ rss_desc_v[i].hash_fields ?
+ rss_desc_v[i].queue_num : 1;
+ } else {
+ /* This is queue action. */
+ rss_desc_v[i].key_len = 0;
+ rss_desc_v[i].hash_fields = 0;
+ rss_desc_v[i].queue =
+ &policy->act_cnt[i].queue;
+ rss_desc_v[i].queue_num = 1;
+ }
rss_desc[i] = &rss_desc_v[i];
}
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
@@ -7223,6 +7240,24 @@ mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
fops->destroy_mtr_drop_tbls(dev);
}
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+void
+mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
+}
+
/**
* Allocate the needed aso flow meter id.
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 04c8806bf6..2f2aa962f9 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1156,6 +1156,9 @@ typedef struct mlx5_flow_meter_sub_policy *
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
(struct rte_eth_dev *dev);
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
@@ -1249,6 +1252,7 @@ struct mlx5_flow_driver_ops {
mlx5_flow_create_def_policy_t create_def_policy;
mlx5_flow_destroy_def_policy_t destroy_def_policy;
mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
+ mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
mlx5_flow_counter_alloc_t counter_alloc;
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
@@ -1562,6 +1566,8 @@ struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
(struct rte_eth_dev *dev,
struct mlx5_flow_meter_policy *mtr_policy,
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
+void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7fc7efbc5c..c7a0a38650 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -14707,12 +14707,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
MLX5_ASSERT(dev_flow.dv.tag_resource);
act_cnt->rix_mark =
dev_flow.handle->dvh.rix_tag;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_MARK;
break;
}
@@ -14760,12 +14754,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
"set tag action");
act_cnt->modify_hdr =
dev_flow.handle->dvh.modify_hdr;
- if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
- dev_flow.handle->rix_hrxq =
- mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
}
@@ -14809,41 +14797,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
}
case RTE_FLOW_ACTION_TYPE_QUEUE:
{
- struct mlx5_hrxq *hrxq;
- uint32_t hrxq_idx;
- struct mlx5_flow_rss_desc rss_desc;
- struct mlx5_flow_meter_sub_policy *sub_policy =
- mtr_policy->sub_policys[domain][0];
-
if (i >= MLX5_MTR_RTE_COLORS)
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
NULL, "cannot create policy "
"fate queue for this color");
- memset(&rss_desc, 0,
- sizeof(struct mlx5_flow_rss_desc));
- rss_desc.queue_num = 1;
- rss_desc.const_q = act->conf;
- hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
- &rss_desc, &hrxq_idx);
- if (!hrxq)
- return -rte_mtr_error_set(error,
- ENOTSUP,
- RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL,
- "cannot create policy fate queue");
- sub_policy->rix_hrxq[i] = hrxq_idx;
+ act_cnt->queue =
+ ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
act_cnt->fate_action =
MLX5_FLOW_FATE_QUEUE;
dev_flow.handle->fate_action =
MLX5_FLOW_FATE_QUEUE;
- if (action_flags & MLX5_FLOW_ACTION_MARK ||
- action_flags & MLX5_FLOW_ACTION_SET_TAG) {
- dev_flow.handle->rix_hrxq = hrxq_idx;
- flow_drv_rxq_flags_set(dev,
- dev_flow.handle);
- }
+ mtr_policy->is_queue = 1;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
}
@@ -16057,6 +16024,73 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
return NULL;
}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ uint32_t i, j;
+ uint16_t sub_policy_num, new_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ new_policy_num = sub_policy_num;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy =
+ mtr_policy->sub_policys[domain][j];
+ if (sub_policy) {
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ if (sub_policy !=
+ mtr_policy->sub_policys[domain][0]) {
+ mtr_policy->sub_policys[domain][j] =
+ NULL;
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ new_policy_num--;
+ }
+ }
+ }
+ if (new_policy_num != sub_policy_num) {
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+ mtr_policy->sub_policy_num |=
+ (new_policy_num &
+ MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+ }
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ break;
+ default:
+ /*Other actions without queue and do nothing*/
+ break;
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+}
+
/**
* Validate the batch counter support in root table.
*
@@ -16668,6 +16702,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.create_def_policy = flow_dv_create_def_policy,
.destroy_def_policy = flow_dv_destroy_def_policy,
.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
+ .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ac2735e6e2..16991748dc 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -748,7 +748,7 @@ mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
policy->actions, error);
if (ret)
goto policy_add_err;
- if (!is_rss) {
+ if (!is_rss && !mtr_policy->is_queue) {
/* Create policy rules in HW. */
ret = mlx5_flow_create_policy_rules(dev, mtr_policy);
if (ret)
@@ -1808,6 +1808,40 @@ mlx5_flow_meter_detach(struct mlx5_priv *priv,
#endif
}
+/**
+ * Flush meter with Rx queue configuration.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ struct mlx5_flow_meter_policy *mtr_policy;
+ void *entry;
+ uint32_t i, policy_idx;
+
+ if (!priv->mtr_en)
+ return;
+ if (priv->sh->mtrmng->policy_idx_tbl && priv->sh->refcnt == 1) {
+ MLX5_L3T_FOREACH(priv->sh->mtrmng->policy_idx_tbl,
+ i, entry) {
+ policy_idx = *(uint32_t *)entry;
+ sub_policy = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ policy_idx);
+ if (!sub_policy || !sub_policy->main_policy)
+ continue;
+ mtr_policy = sub_policy->main_policy;
+ if (mtr_policy->is_queue || mtr_policy->is_rss)
+ mlx5_flow_destroy_sub_policy_with_rxq(dev,
+ mtr_policy);
+ }
+ }
+}
+
/**
* Flush meter configuration.
*
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index eb8c99cd93..879d3171e9 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1180,7 +1180,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
mlx5_traffic_disable(dev);
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, &priv->flows, true);
- mlx5_flow_meter_flush(dev, NULL);
+ mlx5_flow_meter_rxq_flush(dev);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
--
2.27.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/mlx5: fix meter cleaning in stop operation
2021-05-13 7:35 [dpdk-dev] [PATCH v1] net/mlx5: fix meter cleaning in stop operation Li Zhang
2021-05-13 8:35 ` [dpdk-dev] [PATCH v2] " Li Zhang
@ 2021-05-13 9:56 ` Matan Azrad
1 sibling, 0 replies; 4+ messages in thread
From: Matan Azrad @ 2021-05-13 9:56 UTC (permalink / raw)
To: Li Zhang, Ori Kam, Slava Ovsiienko, Shahaf Shuler
Cc: dev, NBU-Contact-Thomas Monjalon, Raslan Darawsheh, Roni Bar Yanai
From: Li Zhang
> A meter may handle Rx queue reference in his sub-policies.
> In stop operation, all the Rx queues are released.
>
> Wrongly, the meter reference was not released before destroying the Rx
> queues what cause an error in stop.
>
> Release the Rx queues meter references in stop operation.
>
> Fixes: fc6ce56bba ("net/mlx5: prepare sub-policy for flow with meter")
>
> Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/mlx5: fix meter cleaning in stop operation
2021-05-13 8:35 ` [dpdk-dev] [PATCH v2] " Li Zhang
@ 2021-05-13 13:01 ` Thomas Monjalon
0 siblings, 0 replies; 4+ messages in thread
From: Thomas Monjalon @ 2021-05-13 13:01 UTC (permalink / raw)
To: dev, Li Zhang; +Cc: orika, viacheslavo, matan, shahafs, dev, rasland, roniba
13/05/2021 10:35, Li Zhang:
> A meter may handle Rx queue reference in his sub-policies.
> In stop operation, all the Rx queues are released.
>
> Wrongly, the meter reference was not released before
> destroying the Rx queues what cause an error in stop.
>
> Release the Rx queues meter references in stop operation.
>
> Fixes: fc6ce56bba ("net/mlx5: prepare sub-policy for flow with meter")
>
> Signed-off-by: Li Zhang <lizh@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
Applied in next-net-mlx, thanks.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-05-13 13:01 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-13 7:35 [dpdk-dev] [PATCH v1] net/mlx5: fix meter cleaning in stop operation Li Zhang
2021-05-13 8:35 ` [dpdk-dev] [PATCH v2] " Li Zhang
2021-05-13 13:01 ` Thomas Monjalon
2021-05-13 9:56 ` [dpdk-dev] [PATCH v1] " Matan Azrad
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).