From: Maayan Kashani <mkashani@nvidia.com>
To: <dev@dpdk.org>
Cc: <mkashani@nvidia.com>, <dsosnowski@nvidia.com>,
<rasland@nvidia.com>, <stable@dpdk.org>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Bing Zhao <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>, Xueming Li <xuemingl@nvidia.com>
Subject: [PATCH] net/mlx5: fix assert failure on hairpin queue release
Date: Thu, 27 Feb 2025 12:14:14 +0200 [thread overview]
Message-ID: <20250227101415.89079-1-mkashani@nvidia.com> (raw)
Assert was triggered because of ctrl_ref mismatch on hairpin queue.
Fixed the mismatch.
Fixes: 09c2555 ("net/mlx5: support shared Rx queue")
Cc: stable@dpdk.org
Signed-off-by: Maayan Kashani <mkashani@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow.c | 4 ++--
drivers/net/mlx5/mlx5_rx.h | 1 +
drivers/net/mlx5/mlx5_rxq.c | 12 ++++++++----
4 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 545ba48b3cd..6df99c25e2f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2023,6 +2023,7 @@ struct mlx5_priv {
uint32_t ctrl_flows; /* Control flow rules. */
rte_spinlock_t flow_list_lock;
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
+ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
struct mlx5_list *hrxqs; /* Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f8b3e504b35..6169ebc13f6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1648,13 +1648,13 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
opriv->domain_id != priv->domain_id ||
opriv->mark_enabled)
continue;
- LIST_FOREACH(rxq_ctrl, &opriv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) {
rxq_ctrl->rxq.mark = 1;
}
opriv->mark_enabled = 1;
}
} else {
- LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
rxq_ctrl->rxq.mark = 1;
}
priv->mark_enabled = 1;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index f80a2e32279..6380895502e 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -169,6 +169,7 @@ struct __rte_cache_aligned mlx5_rxq_data {
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
+ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a5971b5cdda..5cf7d4971b3 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1037,6 +1037,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
rte_errno = ENOMEM;
return -rte_errno;
}
+ rte_atomic_fetch_add_explicit(&rxq_ctrl->ctrl_ref, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
@@ -2006,8 +2007,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.shared = 1;
tmpl->share_group = conf->share_group;
tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
}
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
return tmpl;
error:
@@ -2061,7 +2063,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.idx = idx;
rxq->hairpin_conf = *hairpin_conf;
mlx5_rxq_ref(dev, idx);
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
return tmpl;
}
@@ -2336,7 +2338,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
if (!rxq_ctrl->is_hairpin)
mlx5_mr_btree_free
(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
- LIST_REMOVE(rxq_ctrl, share_entry);
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
}
dev->data->rx_queues[idx] = NULL;
@@ -2362,7 +2366,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
- LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
dev->data->port_id, rxq_ctrl->rxq.idx);
++ret;
--
2.21.0
reply other threads:[~2025-02-27 10:14 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250227101415.89079-1-mkashani@nvidia.com \
--to=mkashani@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=stable@dpdk.org \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
--cc=xuemingl@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).