From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
<rasland@nvidia.com>, Michael Baum <michaelba@nvidia.com>,
<dsosnowski@nvidia.com>,
"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
Bing Zhao <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Subject: [PATCH 1/3] net/mlx5: fix multi process Tx default rules
Date: Wed, 29 Oct 2025 17:57:08 +0200 [thread overview]
Message-ID: <20251029155711.169580-1-getelson@nvidia.com> (raw)
From: Michael Baum <michaelba@nvidia.com>
When representor matching is disabled, an egress default rule is
inserted which matches all and copies REG_A to REG_C_1 (when dv_xmeta_en
== 4) and jump to group 1. All user rules started from group 1.
When 2 processes are working together, the first one creates this flow
rule and the second one is failed with errno EEXIST. This renders all
user egress rules in 2nd process to be invalid.
This patch changes this default rule match on SQs.
Fixes: 483181f7b6dd ("net/mlx5: support device control of representor matching")
Cc: dsosnowski@nvidia.com
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
# Conflicts:
# drivers/net/mlx5/mlx5_flow_hw.c
---
drivers/net/mlx5/mlx5_flow.h | 4 +++-
drivers/net/mlx5/mlx5_flow_hw.c | 24 +++++++++++-------------
drivers/net/mlx5/mlx5_trigger.c | 25 +++++++++++++------------
drivers/net/mlx5/mlx5_txq.c | 8 ++++++++
4 files changed, 35 insertions(+), 26 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ff61706054..07d2f4185c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -3582,7 +3582,9 @@ int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
uint32_t sqn);
int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
-int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
+int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev,
+ uint32_t sqn,
+ bool external);
int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 491a78a0de..d945c88eb0 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -10643,7 +10643,7 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
.priority = MLX5_HW_LOWEST_PRIO_ROOT,
.egress = 1,
},
- .nb_flows = 1, /* One default flow rule for all. */
+ .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
};
struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
.attr = tx_tbl_attr,
@@ -16004,21 +16004,18 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
}
int
-mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
+mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_item_eth promisc = {
- .hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .hdr.ether_type = 0,
+ struct mlx5_rte_flow_item_sq sq_spec = {
+ .queue = sqn,
};
- struct rte_flow_item eth_all[] = {
- [0] = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .spec = &promisc,
- .mask = &promisc,
+ struct rte_flow_item items[] = {
+ {
+ .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_spec,
},
- [1] = {
+ {
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
@@ -16048,6 +16045,7 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
};
struct mlx5_ctrl_flow_info flow_info = {
.type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
+ .tx_repr_sq = sqn,
};
MLX5_ASSERT(priv->master);
@@ -16057,7 +16055,7 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
return 0;
return flow_hw_create_ctrl_flow(dev, dev,
priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
- eth_all, 0, copy_reg_action, 0, &flow_info, false);
+ items, 0, copy_reg_action, 0, &flow_info, external);
}
int
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 916ac03c16..e6acb56d4d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1606,18 +1606,6 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- /*
- * With extended metadata enabled, the Tx metadata copy is handled by default
- * Tx tagging flow rules, so default Tx flow rule is not needed. It is only
- * required when representor matching is disabled.
- */
- if (config->dv_esw_en &&
- !config->repr_matching &&
- config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
- priv->master) {
- if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev))
- goto error;
- }
for (i = 0; i < priv->txqs_n; ++i) {
struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
uint32_t queue;
@@ -1639,6 +1627,19 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
goto error;
}
}
+ /*
+ * With extended metadata enabled, the Tx metadata copy is handled by default
+ * Tx tagging flow rules, so default Tx flow rule is not needed. It is only
+ * required when representor matching is disabled.
+ */
+ if (config->dv_esw_en && !config->repr_matching &&
+ config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ (priv->master || priv->representor)) {
+ if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev, queue, false)) {
+ mlx5_txq_release(dev, i);
+ goto error;
+ }
+ }
mlx5_txq_release(dev, i);
}
if (config->fdb_def_rule) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b090d8274d..834ca541d5 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1459,6 +1459,14 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
return -rte_errno;
}
+
+ if (!priv->sh->config.repr_matching &&
+ priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev, sq_num, true)) {
+ if (sq_miss_created)
+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
+ return -rte_errno;
+ }
return 0;
}
#endif
--
2.51.0
next reply other threads:[~2025-10-29 15:59 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-29 15:57 Gregory Etelson [this message]
2025-10-29 15:57 ` [PATCH 2/3] net/mlx5: fix control flow leakage for external SQ Gregory Etelson
2025-10-29 15:57 ` [PATCH 3/3] net/mlx5: support flow metadata exchange between E-Switch and VM Gregory Etelson
2025-11-02 7:32 ` [PATCH 1/3] net/mlx5: fix multi process Tx default rules Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251029155711.169580-1-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=michaelba@nvidia.com \
--cc=mkashani@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).