From: Suanming Mou <suanmingm@nvidia.com>
To: Dariusz Sosnowski <dsosnowski@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Ori Kam <orika@nvidia.com>, Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>
Subject: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
Date: Fri, 31 May 2024 11:50:34 +0800 [thread overview]
Message-ID: <20240531035034.1731943-3-suanmingm@nvidia.com> (raw)
In-Reply-To: <20240531035034.1731943-1-suanmingm@nvidia.com>
For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE,
this commit provides the map and unmap functions to convert the
external created SQ's devx ID to DPDK flow item Tx queue ID.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 12 ++-
drivers/net/mlx5/mlx5.c | 5 +
drivers/net/mlx5/mlx5.h | 7 ++
drivers/net/mlx5/mlx5_defs.h | 3 +
drivers/net/mlx5/mlx5_devx.c | 40 ++++++++
drivers/net/mlx5/mlx5_devx.h | 1 +
drivers/net/mlx5/mlx5_ethdev.c | 8 ++
drivers/net/mlx5/mlx5_flow.h | 6 ++
drivers/net/mlx5/mlx5_rx.h | 6 --
drivers/net/mlx5/mlx5_rxq.c | 22 +----
drivers/net/mlx5/mlx5_tx.h | 25 +++++
drivers/net/mlx5/mlx5_txq.c | 152 +++++++++++++++++++++++++++++++
drivers/net/mlx5/rte_pmd_mlx5.h | 48 ++++++++++
drivers/net/mlx5/version.map | 3 +
14 files changed, 312 insertions(+), 26 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 8cfbc25430..bb566ea236 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1224,7 +1224,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
- DRV_LOG(DEBUG, "External RxQ is supported.");
+ priv->ext_txqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+ sizeof(struct mlx5_external_q) *
+ MLX5_MAX_EXT_TX_QUEUES, 0,
+ SOCKET_ID_ANY);
+ if (priv->ext_txqs == NULL) {
+ DRV_LOG(ERR, "Fail to allocate external TxQ array.");
+ err = ENOMEM;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "External queue is supported.");
}
priv->sh = sh;
priv->dev_port = spawn->phys_port;
@@ -1763,6 +1772,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (eth_dev && priv->flex_item_map)
mlx5_flex_item_port_cleanup(eth_dev);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d15302d00d..e41b1e82d7 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2436,6 +2436,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
dev->data->port_id);
+ ret = mlx5_ext_txq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external TxQ still remain.",
+ dev->data->port_id);
ret = mlx5_txq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Tx queues still remain",
@@ -2447,6 +2451,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
/*
* The interrupt handler port id must be reset before priv is reset
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 07d050b225..5b23043b8b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -381,6 +381,12 @@ struct mlx5_lb_ctx {
RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
+/* External queue descriptor. */
+struct mlx5_external_q {
+ uint32_t hw_id; /* Queue index in the Hardware. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
+};
+
/* HW steering queue job descriptor type. */
enum mlx5_hw_job_type {
MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
@@ -1883,6 +1889,7 @@ struct mlx5_priv {
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
+ struct mlx5_external_q *ext_txqs; /* External TX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index dc5216cb24..9c454983be 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -183,6 +183,9 @@
/* Maximum number of external Rx queues supported by rte_flow */
#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
+/* Maximum number of external Tx queues supported by rte_flow */
+#define MLX5_MAX_EXT_TX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_TX_QUEUE_ID_MIN + 1)
+
/*
* Linux definition of static_assert is found in /usr/include/assert.h.
* Windows does not require a redefinition.
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index cae9d578ab..f23eb1def6 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -27,6 +27,46 @@
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
+/**
+ * Validate given external queue's port is valid or not.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ *
+ * @return
+ * 0 on success, non-0 otherwise
+ */
+int
+mlx5_devx_extq_port_validate(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
+ DRV_LOG(ERR, "Port %u "
+ "external queue isn't supported on local PD and CTX.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ if (!mlx5_devx_obj_ops_en(priv->sh)) {
+ DRV_LOG(ERR,
+ "Port %u external queue isn't supported by Verbs API.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ return 0;
+}
+
/**
* Modify RQ vlan stripping offload
*
diff --git a/drivers/net/mlx5/mlx5_devx.h b/drivers/net/mlx5/mlx5_devx.h
index ebd1da455a..4ab8cfbd22 100644
--- a/drivers/net/mlx5/mlx5_devx.h
+++ b/drivers/net/mlx5/mlx5_devx.h
@@ -12,6 +12,7 @@ int mlx5_txq_devx_modify(struct mlx5_txq_obj *obj,
enum mlx5_txq_modify_type type, uint8_t dev_port);
void mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj);
int mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type);
+int mlx5_devx_extq_port_validate(uint16_t port_id);
extern struct mlx5_obj_ops devx_obj_ops;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index aea799341c..1b721cda5e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -123,6 +123,14 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
+ if (priv->ext_txqs && txqs_n >= MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "port %u cannot handle this many Tx queues (%u), "
+ "the maximal number of internal Tx queues is %u",
+ dev->data->port_id, txqs_n,
+ MLX5_EXTERNAL_TX_QUEUE_ID_MIN - 1);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rxqs_n > priv->sh->dev_cap.ind_table_max_size) {
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
dev->data->port_id, rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8f53e82985..9a359da042 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1954,12 +1954,18 @@ static __rte_always_inline int
flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
{
struct mlx5_txq_ctrl *txq;
+ struct mlx5_external_q *ext_txq;
/* Means Tx queue is PF0. */
if (tx_queue == UINT16_MAX) {
*sqn = 0;
return 0;
}
+ if (mlx5_is_external_txq(dev, tx_queue)) {
+ ext_txq = mlx5_ext_txq_get(dev, tx_queue);
+ *sqn = ext_txq->hw_id;
+ return 0;
+ }
txq = mlx5_txq_get(dev, tx_queue);
if (unlikely(!txq))
return -ENOENT;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index decb14e708..1485556d89 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -185,12 +185,6 @@ struct mlx5_rxq_priv {
uint32_t lwm_devx_subscribed:1;
};
-/* External RX queue descriptor. */
-struct mlx5_external_q {
- uint32_t hw_id; /* Queue index in the Hardware. */
- RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
-};
-
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d6c84b84e4..f13fc3b353 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -3211,6 +3211,7 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
{
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
+ int ret;
if (dpdk_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
@@ -3218,28 +3219,11 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
rte_errno = EINVAL;
return NULL;
}
- if (rte_eth_dev_is_valid_port(port_id) < 0) {
- DRV_LOG(ERR, "There is no Ethernet device for port %u.",
- port_id);
- rte_errno = ENODEV;
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
return NULL;
- }
dev = &rte_eth_devices[port_id];
priv = dev->data->dev_private;
- if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
- DRV_LOG(ERR, "Port %u "
- "external RxQ isn't supported on local PD and CTX.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
- if (!mlx5_devx_obj_ops_en(priv->sh)) {
- DRV_LOG(ERR,
- "Port %u external RxQ isn't supported by Verbs API.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
/*
* When user configures remote PD and CTX and device creates RxQ by
* DevX, external RxQs array is allocated.
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 0d77ff89de..983913faa2 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -227,6 +227,8 @@ void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
int mlx5_count_aggr_ports(struct rte_eth_dev *dev);
int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint8_t affinity);
+int mlx5_ext_txq_verify(struct rte_eth_dev *dev);
+struct mlx5_external_q *mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx);
/* mlx5_tx.c */
@@ -3788,4 +3790,27 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
return loc.pkts_sent;
}
+/**
+ * Check whether given TxQ is external.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue_idx
+ * Tx queue index.
+ *
+ * @return
+ * True if is external TxQ, otherwise false.
+ */
+static __rte_always_inline bool
+mlx5_is_external_txq(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+
+ if (!priv->ext_txqs || queue_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN)
+ return false;
+ txq = &priv->ext_txqs[queue_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+ return !!rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed);
+}
+
#endif /* RTE_PMD_MLX5_TX_H_ */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index da4236f99a..8eb1ae1f03 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -27,6 +27,7 @@
#include "mlx5_tx.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
#include "rte_pmd_mlx5.h"
#include "mlx5_flow.h"
@@ -1183,6 +1184,57 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
return ctrl;
}
+/**
+ * Get an external Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External Tx queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_q *
+mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ MLX5_ASSERT(mlx5_is_external_txq(dev, idx));
+ return &priv->ext_txqs[idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+/**
+ * Verify the external Tx Queue list is empty.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ext_txq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+ uint32_t i;
+ int ret = 0;
+
+ if (priv->ext_txqs == NULL)
+ return 0;
+
+ for (i = MLX5_EXTERNAL_TX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+ txq = mlx5_ext_txq_get(dev, i);
+ if (txq->refcnt < 2)
+ continue;
+ DRV_LOG(DEBUG, "Port %u external TxQ %u still referenced.",
+ dev->data->port_id, i);
+ ++ret;
+ }
+ return ret;
+}
+
/**
* Release a Tx queue.
*
@@ -1416,3 +1468,103 @@ int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
txq_ctrl->txq.tx_aggr_affinity = affinity;
return 0;
}
+
+/**
+ * Validate given external TxQ rte_flow index, and get pointer to concurrent
+ * external TxQ object to map/unmap.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Tx Queue index in rte_flow.
+ *
+ * @return
+ * Pointer to concurrent external TxQ on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_external_q *
+mlx5_external_tx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ int ret;
+
+ if (dpdk_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
+ dpdk_idx, MLX5_EXTERNAL_TX_QUEUE_ID_MIN, UINT16_MAX);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
+ return NULL;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ /*
+ * When user configures remote PD and CTX and device creates TxQ by
+ * DevX, external TxQs array is allocated.
+ */
+ MLX5_ASSERT(priv->ext_txqs != NULL);
+ return &priv->ext_txqs[dpdk_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t unmapped = 0;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ if (ext_txq->hw_id != hw_idx) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u "
+ "is already mapped to HW index (requesting is "
+ "%u, existing is %u).",
+ port_id, dpdk_idx, hw_idx, ext_txq->hw_id);
+ rte_errno = EEXIST;
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %u external TxQ index %u "
+ "is already mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+
+ } else {
+ ext_txq->hw_id = hw_idx;
+ DRV_LOG(DEBUG, "Port %u external TxQ index %u "
+ "is successfully mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+ }
+ return 0;
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t mapped = 1;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (ext_txq->refcnt > 1) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u still referenced.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u doesn't exist.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u external TxQ index %u is successfully unmapped.",
+ port_id, dpdk_idx);
+ return 0;
+}
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 004be0eea1..359e4192c8 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -68,6 +68,11 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
*/
#define RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+/**
+ * External Tx queue rte_flow index minimal value.
+ */
+#define MLX5_EXTERNAL_TX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+
/**
* Tag level to set the linear hash index.
*/
@@ -116,6 +121,49 @@ __rte_experimental
int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id,
uint16_t dpdk_idx);
+/**
+ * Update mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ * @param[in] hw_idx
+ * Queue index in hardware.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EEXIST - a mapping with the same rte_flow index already exists.
+ * - EINVAL - invalid rte_flow index, out of range.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx);
+
+/**
+ * Remove mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EINVAL - invalid index, out of range, still referenced or doesn't exist.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id,
+ uint16_t dpdk_idx);
+
/**
* The rate of the host port shaper will be updated directly at the next
* available descriptor threshold event to the rate that comes with this flag set;
diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
index 8fb0e07303..8a78d14786 100644
--- a/drivers/net/mlx5/version.map
+++ b/drivers/net/mlx5/version.map
@@ -20,4 +20,7 @@ EXPERIMENTAL {
# added in 24.03
rte_pmd_mlx5_create_geneve_tlv_parser;
rte_pmd_mlx5_destroy_geneve_tlv_parser;
+ # added in 24.07
+ rte_pmd_mlx5_external_tx_queue_id_map;
+ rte_pmd_mlx5_external_tx_queue_id_unmap;
};
--
2.34.1
next prev parent reply other threads:[~2024-05-31 3:51 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 8:14 ` Dariusz Sosnowski
2024-05-31 3:50 ` Suanming Mou [this message]
2024-06-05 8:17 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Dariusz Sosnowski
2024-06-05 8:23 ` Suanming Mou
2024-06-05 8:14 ` [PATCH 1/3] net/mlx5: add match with Tx queue item Dariusz Sosnowski
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-05 15:16 ` Dariusz Sosnowski
2024-06-06 10:50 ` [PATCH v2 1/3] net/mlx5: add match with Tx queue item Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240531035034.1731943-3-suanmingm@nvidia.com \
--to=suanmingm@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).