From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data
Date: Sun, 26 Sep 2021 19:19:00 +0800 [thread overview]
Message-ID: <20210926111904.237736-8-xuemingl@nvidia.com> (raw)
In-Reply-To: <20210926111904.237736-1-xuemingl@nvidia.com>
Hairpin info of Rx queue can't be shared, moves to private queue data.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/mlx5_rx.h | 4 ++--
drivers/net/mlx5/mlx5_rxq.c | 13 +++++--------
drivers/net/mlx5/mlx5_trigger.c | 24 ++++++++++++------------
3 files changed, 19 insertions(+), 22 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index fe19414c130..2ed544556f5 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -171,8 +171,6 @@ struct mlx5_rxq_ctrl {
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
uint32_t wqn; /* WQ number. */
uint16_t dump_file_n; /* Number of dump files. */
- struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
- uint32_t hairpin_status; /* Hairpin binding status. */
};
/* RX queue private data. */
@@ -182,6 +180,8 @@ struct mlx5_rxq_priv {
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
+ struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
+ uint32_t hairpin_status; /* Hairpin binding status. */
};
/* mlx5_rxq.c */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 7f28646f55c..21cb1000899 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1649,8 +1649,8 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
- tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
+ rxq->hairpin_conf = *hairpin_conf;
mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
@@ -1869,14 +1869,11 @@ const struct rte_eth_hairpin_conf *
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq_ctrl->hairpin_conf;
+ if (idx < priv->rxqs_n && rxq != NULL) {
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return &rxq->hairpin_conf;
}
return NULL;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index a49254c96f6..f376f4d6fc4 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -273,7 +273,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
}
rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
- rxq_ctrl->hairpin_conf.peers[0].queue != i) {
+ rxq->hairpin_conf.peers[0].queue != i) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
"Rx queue %d", dev->data->port_id,
@@ -303,7 +303,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
if (ret)
goto error;
/* Qs with auto-bind will be destroyed directly. */
- rxq_ctrl->hairpin_status = 1;
+ rxq->hairpin_status = 1;
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, i);
}
@@ -406,9 +406,9 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
}
peer_info->qp_id = rxq_ctrl->obj->rq->id;
peer_info->vhca_id = priv->config.hca_attr.vhca_id;
- peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
- peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
- peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
+ peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
+ peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
+ peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
}
return 0;
}
@@ -530,20 +530,20 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status != 0) {
+ if (rxq->hairpin_status != 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
dev->data->port_id, cur_queue);
return 0;
}
if (peer_info->tx_explicit !=
- rxq_ctrl->hairpin_conf.tx_explicit) {
+ rxq->hairpin_conf.tx_explicit) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
" mismatch", dev->data->port_id, cur_queue);
return -rte_errno;
}
if (peer_info->manual_bind !=
- rxq_ctrl->hairpin_conf.manual_bind) {
+ rxq->hairpin_conf.manual_bind) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
" mismatch", dev->data->port_id, cur_queue);
@@ -555,7 +555,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 1;
+ rxq->hairpin_status = 1;
}
return ret;
}
@@ -637,7 +637,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status == 0) {
+ if (rxq->hairpin_status == 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
dev->data->port_id, cur_queue);
return 0;
@@ -652,7 +652,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
rq_attr.rq_state = MLX5_SQC_STATE_RST;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 0;
+ rxq->hairpin_status = 0;
}
return ret;
}
@@ -990,7 +990,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
continue;
- pp = rxq_ctrl->hairpin_conf.peers[0].port;
+ pp = rxq->hairpin_conf.peers[0].port;
if (pp >= RTE_MAX_ETHPORTS) {
rte_errno = ERANGE;
DRV_LOG(ERR, "port %hu queue %u peer port "
--
2.33.0
next prev parent reply other threads:[~2021-09-26 11:20 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] net/mlx5: support shared Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count Xueming Li
2021-09-26 11:19 ` Xueming Li [this message]
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210926111904.237736-8-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).