From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH v3 4/6] net/mlx5: optimize RxQ/TxQ control structure
Date: Fri, 25 Feb 2022 01:25:09 +0200 [thread overview]
Message-ID: <20220224232511.3238707-5-michaelba@nvidia.com> (raw)
In-Reply-To: <20220224232511.3238707-1-michaelba@nvidia.com>
The RxQ/TxQ control structure has a field named type. This type is enum
with values for standard and hairpin.
The use of this field is to check whether the queue is of the hairpin
type or standard.
This patch replaces it with a boolean variable that saves whether it is
a hairpin.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/mlx5_devx.c | 26 ++++++++++--------------
drivers/net/mlx5/mlx5_ethdev.c | 2 +-
drivers/net/mlx5/mlx5_flow.c | 14 ++++++-------
drivers/net/mlx5/mlx5_flow_dv.c | 14 +++++--------
drivers/net/mlx5/mlx5_rx.h | 13 +++---------
drivers/net/mlx5/mlx5_rxq.c | 33 +++++++++++-------------------
drivers/net/mlx5/mlx5_trigger.c | 36 ++++++++++++++++-----------------
drivers/net/mlx5/mlx5_tx.h | 7 +------
drivers/net/mlx5/mlx5_txq.c | 14 ++++++-------
9 files changed, 64 insertions(+), 95 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 8d151fa4ab..bcd2358165 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -88,7 +88,7 @@ mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
default:
break;
}
- if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ if (rxq->ctrl->is_hairpin)
return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
}
@@ -162,7 +162,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
if (rxq_obj == NULL)
return;
- if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
+ if (rxq_obj->rxq_ctrl->is_hairpin) {
if (rxq_obj->rq == NULL)
return;
mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
@@ -476,7 +476,7 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(tmpl);
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ if (rxq_ctrl->is_hairpin)
return mlx5_rxq_obj_hairpin_new(rxq);
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq && !rxq_ctrl->started) {
@@ -583,7 +583,7 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
MLX5_ASSERT(rxq != NULL);
- if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ if (rxq->ctrl->is_hairpin)
rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
else
rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
@@ -706,17 +706,13 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
int tunnel, struct mlx5_devx_tir_attr *tir_attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- enum mlx5_rxq_type rxq_obj_type;
+ bool is_hairpin;
bool lro = true;
uint32_t i;
/* NULL queues designate drop queue. */
if (ind_tbl->queues != NULL) {
- struct mlx5_rxq_ctrl *rxq_ctrl =
- mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]);
- rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type :
- MLX5_RXQ_TYPE_STANDARD;
-
+ is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]);
/* Enable TIR LRO only if all the queues were configured for. */
for (i = 0; i < ind_tbl->queues_n; ++i) {
struct mlx5_rxq_data *rxq_i =
@@ -728,7 +724,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
}
}
} else {
- rxq_obj_type = priv->drop_queue.rxq->ctrl->type;
+ is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
}
memset(tir_attr, 0, sizeof(*tir_attr));
tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
@@ -759,7 +755,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
}
- if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
+ if (is_hairpin)
tir_attr->transport_domain = priv->sh->td->id;
else
tir_attr->transport_domain = priv->sh->tdn;
@@ -940,7 +936,7 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
goto error;
}
rxq_obj->rxq_ctrl = rxq_ctrl;
- rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
+ rxq_ctrl->is_hairpin = false;
rxq_ctrl->sh = priv->sh;
rxq_ctrl->obj = rxq_obj;
rxq->ctrl = rxq_ctrl;
@@ -1242,7 +1238,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
+ if (txq_ctrl->is_hairpin)
return mlx5_txq_obj_hairpin_new(dev, idx);
#if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
@@ -1381,7 +1377,7 @@ void
mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
{
MLX5_ASSERT(txq_obj);
- if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
+ if (txq_obj->txq_ctrl->is_hairpin) {
if (txq_obj->tis)
claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 72bf8ac914..406761ccf8 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -173,7 +173,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
for (i = 0, j = 0; i < rxqs_n; i++) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl && !rxq_ctrl->is_hairpin)
rss_queue_arr[j++] = i;
}
rss_queue_n = j;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5a4e000c12..09701a73c1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1788,7 +1788,7 @@ mlx5_validate_rss_queues(struct rte_eth_dev *dev,
const char **error, uint32_t *queue_idx)
{
const struct mlx5_priv *priv = dev->data->dev_private;
- enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+ bool is_hairpin = false;
uint32_t i;
for (i = 0; i != queues_n; ++i) {
@@ -1805,9 +1805,9 @@ mlx5_validate_rss_queues(struct rte_eth_dev *dev,
*queue_idx = i;
return -EINVAL;
}
- if (i == 0)
- rxq_type = rxq_ctrl->type;
- if (rxq_type != rxq_ctrl->type) {
+ if (i == 0 && rxq_ctrl->is_hairpin)
+ is_hairpin = true;
+ if (is_hairpin != rxq_ctrl->is_hairpin) {
*error = "combining hairpin and regular RSS queues is not supported";
*queue_idx = i;
return -ENOTSUP;
@@ -5885,15 +5885,13 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
const struct rte_flow_action_queue *queue;
queue = qrss->conf;
- if (mlx5_rxq_get_type(dev, queue->index) ==
- MLX5_RXQ_TYPE_HAIRPIN)
+ if (mlx5_rxq_is_hairpin(dev, queue->index))
qrss = NULL;
} else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss;
rss = qrss->conf;
- if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
- MLX5_RXQ_TYPE_HAIRPIN)
+ if (mlx5_rxq_is_hairpin(dev, rss->queue[0]))
qrss = NULL;
}
}
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7a012f7bb9..313dc64604 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5771,8 +5771,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
}
/* Continue validation for Xcap actions.*/
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
@@ -7957,8 +7956,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
*/
if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
MLX5_FLOW_VLAN_ACTIONS)) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
conf->tx_explicit != 0))) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
@@ -10948,10 +10946,8 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
{
const struct mlx5_rte_flow_item_tx_queue *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v;
- void *misc_m =
- MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
- void *misc_v =
- MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct mlx5_txq_ctrl *txq;
uint32_t queue, mask;
@@ -10962,7 +10958,7 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
txq = mlx5_txq_get(dev, queue_v->queue);
if (!txq)
return;
- if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
+ if (txq->is_hairpin)
queue = txq->obj->sq->id;
else
queue = txq->obj->sq_obj.sq->id;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 295dba063b..fbc86dcef2 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -141,12 +141,6 @@ struct mlx5_rxq_data {
/* Buffer split segment descriptions - sizes, offsets, pools. */
} __rte_cache_aligned;
-enum mlx5_rxq_type {
- MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
- MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
- MLX5_RXQ_TYPE_UNDEFINED,
-};
-
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
@@ -154,7 +148,7 @@ struct mlx5_rxq_ctrl {
LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
- enum mlx5_rxq_type type; /* Rxq type. */
+ bool is_hairpin; /* Whether RxQ type is Hairpin. */
unsigned int socket; /* CPU socket ID for allocations. */
LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
uint32_t share_group; /* Group ID of shared RXQ. */
@@ -258,7 +252,7 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
-enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
+bool mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx);
const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
@@ -632,8 +626,7 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq_ctrl == NULL ||
- rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
continue;
n_ibv++;
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e7284f9da9..e96584d55d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1391,8 +1391,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
struct mlx5_rxq_data *rxq;
- if (rxq_ctrl == NULL ||
- rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
continue;
rxq = &rxq_ctrl->rxq;
n_ibv++;
@@ -1480,8 +1479,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq_ctrl == NULL ||
- rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
continue;
rxq_ctrl->rxq.mprq_mp = mp;
}
@@ -1798,7 +1796,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOSPC;
goto error;
}
- tmpl->type = MLX5_RXQ_TYPE_STANDARD;
+ tmpl->is_hairpin = false;
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
&priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
@@ -1969,7 +1967,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
LIST_INIT(&tmpl->owners);
rxq->ctrl = tmpl;
LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
- tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
+ tmpl->is_hairpin = true;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
@@ -2120,7 +2118,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (!rxq_ctrl->is_hairpin) {
if (!rxq_ctrl->started)
rxq_free_elts(rxq_ctrl);
dev->data->rx_queue_state[idx] =
@@ -2129,7 +2127,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
} else { /* Refcnt zero, closing device. */
LIST_REMOVE(rxq, owner_entry);
if (LIST_EMPTY(&rxq_ctrl->owners)) {
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (!rxq_ctrl->is_hairpin)
mlx5_mr_btree_free
(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
if (rxq_ctrl->rxq.shared)
@@ -2169,7 +2167,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
}
/**
- * Get a Rx queue type.
+ * Check whether RxQ type is Hairpin.
*
* @param dev
* Pointer to Ethernet device.
@@ -2177,17 +2175,15 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
* Rx queue index.
*
* @return
- * The Rx queue type.
+ * True if Rx queue type is Hairpin, otherwise False.
*/
-enum mlx5_rxq_type
-mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
+bool
+mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
- if (idx < priv->rxqs_n && rxq_ctrl != NULL)
- return rxq_ctrl->type;
- return MLX5_RXQ_TYPE_UNDEFINED;
+ return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
}
/*
@@ -2204,14 +2200,9 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
const struct rte_eth_hairpin_conf *
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && rxq != NULL) {
- if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq->hairpin_conf;
- }
- return NULL;
+ return mlx5_rxq_is_hairpin(dev, idx) ? &rxq->hairpin_conf : NULL;
}
/**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 74c3bc8a13..fe8b42c414 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -59,7 +59,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
if (!txq_ctrl)
continue;
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+ if (!txq_ctrl->is_hairpin)
txq_alloc_elts(txq_ctrl);
MLX5_ASSERT(!txq_ctrl->obj);
txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
@@ -77,7 +77,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
txq_ctrl->obj = NULL;
goto error;
}
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ if (!txq_ctrl->is_hairpin) {
size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
txq_data->fcqs = mlx5_malloc(flags, size,
@@ -167,7 +167,7 @@ mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
{
int ret = 0;
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (!rxq_ctrl->is_hairpin) {
/*
* Pre-register the mempools. Regardless of whether
* the implicit registration is enabled or not,
@@ -280,7 +280,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ if (!txq_ctrl->is_hairpin ||
txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
@@ -299,7 +299,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
if (!txq_ctrl)
continue;
/* Skip hairpin queues with other peer ports. */
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ if (!txq_ctrl->is_hairpin ||
txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
@@ -322,7 +322,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
return -rte_errno;
}
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
+ if (!rxq_ctrl->is_hairpin ||
rxq->hairpin_conf.peers[0].queue != i) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
@@ -412,7 +412,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
dev->data->port_id, peer_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
dev->data->port_id, peer_queue);
@@ -444,7 +444,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
return -rte_errno;
}
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
dev->data->port_id, peer_queue);
@@ -510,7 +510,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
dev->data->port_id, cur_queue);
@@ -570,7 +570,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
return -rte_errno;
}
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
@@ -644,7 +644,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
dev->data->port_id, cur_queue);
@@ -683,7 +683,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
return -rte_errno;
}
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
@@ -751,7 +751,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
@@ -791,7 +791,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
@@ -886,7 +886,7 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
@@ -1016,7 +1016,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
@@ -1040,7 +1040,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
if (rxq == NULL)
continue;
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
+ if (!rxq_ctrl->is_hairpin)
continue;
pp = rxq->hairpin_conf.peers[0].port;
if (pp >= RTE_MAX_ETHPORTS) {
@@ -1318,7 +1318,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
if (!txq_ctrl)
continue;
/* Only Tx implicit mode requires the default Tx flow. */
- if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
+ if (txq_ctrl->is_hairpin &&
txq_ctrl->hairpin_conf.tx_explicit == 0 &&
txq_ctrl->hairpin_conf.peers[0].port ==
priv->dev_data->port_id) {
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 0adc3f4839..89dac0c65a 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -169,17 +169,12 @@ struct mlx5_txq_data {
/* Storage for queued packets, must be the last field. */
} __rte_cache_aligned;
-enum mlx5_txq_type {
- MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
- MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */
-};
-
/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
uint32_t refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
- enum mlx5_txq_type type; /* The txq ctrl type. */
+ bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index f128c3d1a5..0140f8b3b2 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -527,7 +527,7 @@ txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
+ if (txq_ctrl->is_hairpin)
return 0;
MLX5_ASSERT(ppriv);
/*
@@ -570,7 +570,7 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
rte_errno = ENOMEM;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
+ if (txq_ctrl->is_hairpin)
return;
addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
@@ -631,7 +631,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
+ if (txq_ctrl->is_hairpin)
continue;
MLX5_ASSERT(txq->idx == (uint16_t)i);
ret = txq_uar_init_secondary(txq_ctrl, fd);
@@ -1107,7 +1107,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
- tmpl->type = MLX5_TXQ_TYPE_STANDARD;
+ tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
@@ -1150,7 +1150,7 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.port_id = dev->data->port_id;
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
- tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
+ tmpl->is_hairpin = true;
__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1209,7 +1209,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
mlx5_free(txq_ctrl->obj);
txq_ctrl->obj = NULL;
}
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ if (!txq_ctrl->is_hairpin) {
if (txq_ctrl->txq.fcqs) {
mlx5_free(txq_ctrl->txq.fcqs);
txq_ctrl->txq.fcqs = NULL;
@@ -1218,7 +1218,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+ if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
mlx5_free(txq_ctrl);
--
2.25.1
next prev parent reply other threads:[~2022-02-24 23:25 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-22 21:04 [PATCH 0/6] mlx5: external RxQ support Michael Baum
2022-02-22 21:04 ` [PATCH 1/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-22 21:04 ` [PATCH 2/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-22 21:04 ` [PATCH 3/6] net/mlx5: optimize RxQ/TxQ control structure Michael Baum
2022-02-22 21:04 ` [PATCH 4/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-22 21:04 ` [PATCH 5/6] net/mlx5: support queue/RSS action for external RxQ Michael Baum
2022-02-22 21:04 ` [PATCH 6/6] app/testpmd: add test " Michael Baum
2022-02-23 18:48 ` [PATCH v2 0/6] mlx5: external RxQ support Michael Baum
2022-02-23 18:48 ` [PATCH v2 1/6] common/mlx5: consider local functions as internal Michael Baum
2022-02-23 18:48 ` [PATCH v2 2/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-23 18:48 ` [PATCH v2 3/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-23 18:48 ` [PATCH v2 4/6] net/mlx5: optimize RxQ/TxQ control structure Michael Baum
2022-02-23 18:48 ` [PATCH v2 5/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-23 18:48 ` [PATCH v2 6/6] net/mlx5: support queue/RSS action for external RxQ Michael Baum
2022-02-24 8:38 ` [PATCH v2 0/6] mlx5: external RxQ support Matan Azrad
2022-02-24 23:25 ` [PATCH v3 " Michael Baum
2022-02-24 23:25 ` [PATCH v3 1/6] common/mlx5: consider local functions as internal Michael Baum
2022-02-25 18:01 ` Ferruh Yigit
2022-02-25 18:38 ` Thomas Monjalon
2022-02-25 19:13 ` Ferruh Yigit
2022-02-24 23:25 ` [PATCH v3 2/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-24 23:25 ` [PATCH v3 3/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-24 23:25 ` Michael Baum [this message]
2022-02-24 23:25 ` [PATCH v3 5/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-24 23:25 ` [PATCH v3 6/6] net/mlx5: support queue/RSS action for external RxQ Michael Baum
2022-02-25 17:39 ` [PATCH v3 0/6] mlx5: external RxQ support Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220224232511.3238707-5-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).