* [PATCH 1/3] net/mlx5: add match with Tx queue item
@ 2024-05-31 3:50 Suanming Mou
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
` (3 more replies)
0 siblings, 4 replies; 12+ messages in thread
From: Suanming Mou @ 2024-05-31 3:50 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
With the item RTE_FLOW_ITEM_TYPE_TX_QUEUE, user will be able
to set the Tx queue index and create flow match with that
queue index.
This commit adds match with RTE_FLOW_ITEM_TX_QUEUE item.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_24_07.rst | 4 ++
drivers/net/mlx5/hws/mlx5dr_definer.c | 50 +++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 58 ++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_dv.c | 69 ++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
6 files changed, 183 insertions(+)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 81a7067cc3..056e04275b 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -92,6 +92,7 @@ quota = Y
random = Y
tag = Y
tcp = Y
+tx_queue = Y
udp = Y
vlan = Y
vxlan = Y
diff --git a/doc/guides/rel_notes/release_24_07.rst b/doc/guides/rel_notes/release_24_07.rst
index ffbe9ce051..46efc04eac 100644
--- a/doc/guides/rel_notes/release_24_07.rst
+++ b/doc/guides/rel_notes/release_24_07.rst
@@ -81,6 +81,10 @@ New Features
* Added SSE/NEON vector datapath.
+* **Updated NVIDIA mlx5 driver.**
+
+ * Added match with Tx queue.
+
Removed Items
-------------
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index dabfac8abc..bc128c7b99 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -6,6 +6,7 @@
#define GTP_PDU_SC 0x85
#define BAD_PORT 0xBAD
+#define BAD_SQN 0xBAD
#define ETH_TYPE_IPV4_VXLAN 0x0800
#define ETH_TYPE_IPV6_VXLAN 0x86DD
#define UDP_VXLAN_PORT 4789
@@ -878,6 +879,22 @@ mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_tx_queue_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_tx_queue *v = item_spec;
+ uint32_t sqn = 0;
+ int ret;
+
+ ret = flow_hw_conv_sqn(fc->extra_data, v->tx_queue, &sqn);
+ if (unlikely(ret))
+ sqn = BAD_SQN;
+
+ DR_SET(tag, sqn, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static int
mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -1850,6 +1867,35 @@ mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_tx_queue(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_tx_queue *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (m->tx_queue) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_tx_queue_set;
+ /* User extra_data to save DPDK port_id. */
+ fc->extra_data = flow_hw_get_port_id(cd->ctx);
+ if (fc->extra_data == UINT16_MAX) {
+ DR_LOG(ERR, "Invalid port for item tx_queue");
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -3150,6 +3196,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
item_flags |= MLX5_FLOW_LAYER_VXLAN;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = mlx5dr_definer_conv_item_tx_queue(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_SQ;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8b4088e35e..8f53e82985 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -19,6 +19,7 @@
#include "mlx5.h"
#include "rte_pmd_mlx5.h"
#include "hws/mlx5dr.h"
+#include "mlx5_tx.h"
/* E-Switch Manager port, used for rte_flow_item_port_id. */
#define MLX5_PORT_ESW_MGR UINT32_MAX
@@ -1945,6 +1946,63 @@ struct flow_hw_port_info {
extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
+/*
+ * Get sqn for given tx_queue.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
+{
+ struct mlx5_txq_ctrl *txq;
+
+ /* Means Tx queue is PF0. */
+ if (tx_queue == UINT16_MAX) {
+ *sqn = 0;
+ return 0;
+ }
+ txq = mlx5_txq_get(dev, tx_queue);
+ if (unlikely(!txq))
+ return -ENOENT;
+ *sqn = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, tx_queue);
+ return 0;
+}
+
+/*
+ * Convert sqn for given rte_eth_dev port.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
+{
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+ return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
+}
+
+/*
+ * Get given rte_eth_dev port_id.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline uint16_t
+flow_hw_get_port_id(void *dr_ctx)
+{
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ uint16_t port_id;
+
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ struct mlx5_priv *priv;
+
+ priv = rte_eth_devices[port_id].data->dev_private;
+ if (priv->dr_ctx == dr_ctx)
+ return port_id;
+ }
+#else
+ RTE_SET_USED(dr_ctx);
+#endif
+ return UINT16_MAX;
+}
+
/*
* Get metadata match tag and mask for given rte_eth_dev port.
* Used in HWS rule creation.
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 6f72185916..a834b3e2e0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8023,6 +8023,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
last_item = MLX5_FLOW_ITEM_SQ;
break;
@@ -12197,6 +12198,52 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,
return counter;
}
+/**
+ * Add Tx queue matcher
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] key_type
+ * Set flow matcher mask or value.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, void *key,
+ const struct rte_flow_item *item, uint32_t key_type)
+{
+ const struct rte_flow_item_tx_queue *queue_m;
+ const struct rte_flow_item_tx_queue *queue_v;
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint32_t tx_queue;
+ uint32_t sqn = 0;
+ int ret;
+
+ MLX5_ITEM_UPDATE(item, key_type, queue_v, queue_m, &rte_flow_item_tx_queue_mask);
+ if (!queue_m || !queue_v)
+ return -EINVAL;
+ if (key_type & MLX5_SET_MATCHER_V) {
+ tx_queue = queue_v->tx_queue;
+ if (key_type == MLX5_SET_MATCHER_SW_V)
+ tx_queue &= queue_m->tx_queue;
+ ret = flow_hw_get_sqn(dev, tx_queue, &sqn);
+ if (unlikely(ret))
+ return -ret;
+ } else {
+ /* Due to tx_queue to sqn converting, only fully masked value support. */
+ if (queue_m->tx_queue != rte_flow_item_tx_queue_mask.tx_queue)
+ return -EINVAL;
+ sqn = UINT32_MAX;
+ }
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn, sqn);
+ return 0;
+}
+
/**
* Add SQ matcher
*
@@ -14167,6 +14214,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_mlx5_item_tag(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, key, items, key_type);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item");
+ last_item = MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(key, items, key_type);
last_item = MLX5_FLOW_ITEM_SQ;
@@ -14397,6 +14452,20 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item spec");
+ ret = flow_dv_translate_item_tx_queue(dev, match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item mask");
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(match_value, items,
MLX5_SET_MATCHER_SW_V);
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 68c5a36bbb..39c9a0ba50 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7668,6 +7668,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_MPLS:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 2/3] net/mlx5: rename external Rx queue to external queue
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
@ 2024-05-31 3:50 ` Suanming Mou
2024-06-05 8:14 ` Dariusz Sosnowski
2024-05-31 3:50 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
` (2 subsequent siblings)
3 siblings, 1 reply; 12+ messages in thread
From: Suanming Mou @ 2024-05-31 3:50 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
Due to external Tx queue will be supported, in order to reuse
the external queue struct, rename the current external Rx
queue to external queue.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 2 +-
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_devx.c | 2 +-
drivers/net/mlx5/mlx5_rx.h | 8 ++++----
drivers/net/mlx5/mlx5_rxq.c | 16 ++++++++--------
5 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 6dd12f0f68..8cfbc25430 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1216,7 +1216,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
*/
if (mlx5_imported_pd_and_ctx(sh->cdev) && mlx5_devx_obj_ops_en(sh)) {
priv->ext_rxqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
- sizeof(struct mlx5_external_rxq) *
+ sizeof(struct mlx5_external_q) *
MLX5_MAX_EXT_RX_QUEUES, 0,
SOCKET_ID_ANY);
if (priv->ext_rxqs == NULL) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9e4a5feb49..07d050b225 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1882,7 +1882,7 @@ struct mlx5_priv {
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
- struct mlx5_external_rxq *ext_rxqs; /* External RX queues array. */
+ struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 9fa400fc48..cae9d578ab 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -673,7 +673,7 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
}
for (i = 0; i != queues_n; ++i) {
if (mlx5_is_external_rxq(dev, queues[i])) {
- struct mlx5_external_rxq *ext_rxq =
+ struct mlx5_external_q *ext_rxq =
mlx5_ext_rxq_get(dev, queues[i]);
rqt_attr->rq_list[i] = ext_rxq->hw_id;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index d008e4dd3a..decb14e708 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -186,7 +186,7 @@ struct mlx5_rxq_priv {
};
/* External RX queue descriptor. */
-struct mlx5_external_rxq {
+struct mlx5_external_q {
uint32_t hw_id; /* Queue index in the Hardware. */
RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
@@ -227,10 +227,10 @@ uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
-struct mlx5_external_rxq *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
+struct mlx5_external_q *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
uint16_t idx);
uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
-struct mlx5_external_rxq *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
+struct mlx5_external_q *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
@@ -661,7 +661,7 @@ static __rte_always_inline bool
mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_external_rxq *rxq;
+ struct mlx5_external_q *rxq;
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f67aaa6178..d6c84b84e4 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2133,10 +2133,10 @@ mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_external_rxq *
+struct mlx5_external_q *
mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+ struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
@@ -2156,7 +2156,7 @@ mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
uint32_t
mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+ struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
@@ -2172,7 +2172,7 @@ mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_external_rxq *
+struct mlx5_external_q *
mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -2336,7 +2336,7 @@ int
mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_external_rxq *rxq;
+ struct mlx5_external_q *rxq;
uint32_t i;
int ret = 0;
@@ -3206,7 +3206,7 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
* Pointer to concurrent external RxQ on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_external_rxq *
+static struct mlx5_external_q *
mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
{
struct rte_eth_dev *dev;
@@ -3252,7 +3252,7 @@ int
rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
uint32_t hw_idx)
{
- struct mlx5_external_rxq *ext_rxq;
+ struct mlx5_external_q *ext_rxq;
uint32_t unmapped = 0;
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
@@ -3284,7 +3284,7 @@ rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
int
rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
{
- struct mlx5_external_rxq *ext_rxq;
+ struct mlx5_external_q *ext_rxq;
uint32_t mapped = 1;
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
@ 2024-05-31 3:50 ` Suanming Mou
2024-06-05 8:17 ` Dariusz Sosnowski
2024-06-05 8:14 ` [PATCH 1/3] net/mlx5: add match with Tx queue item Dariusz Sosnowski
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
3 siblings, 1 reply; 12+ messages in thread
From: Suanming Mou @ 2024-05-31 3:50 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE,
this commit provides the map and unmap functions to convert the
external created SQ's devx ID to DPDK flow item Tx queue ID.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 12 ++-
drivers/net/mlx5/mlx5.c | 5 +
drivers/net/mlx5/mlx5.h | 7 ++
drivers/net/mlx5/mlx5_defs.h | 3 +
drivers/net/mlx5/mlx5_devx.c | 40 ++++++++
drivers/net/mlx5/mlx5_devx.h | 1 +
drivers/net/mlx5/mlx5_ethdev.c | 8 ++
drivers/net/mlx5/mlx5_flow.h | 6 ++
drivers/net/mlx5/mlx5_rx.h | 6 --
drivers/net/mlx5/mlx5_rxq.c | 22 +----
drivers/net/mlx5/mlx5_tx.h | 25 +++++
drivers/net/mlx5/mlx5_txq.c | 152 +++++++++++++++++++++++++++++++
drivers/net/mlx5/rte_pmd_mlx5.h | 48 ++++++++++
drivers/net/mlx5/version.map | 3 +
14 files changed, 312 insertions(+), 26 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 8cfbc25430..bb566ea236 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1224,7 +1224,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
- DRV_LOG(DEBUG, "External RxQ is supported.");
+ priv->ext_txqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+ sizeof(struct mlx5_external_q) *
+ MLX5_MAX_EXT_TX_QUEUES, 0,
+ SOCKET_ID_ANY);
+ if (priv->ext_txqs == NULL) {
+ DRV_LOG(ERR, "Fail to allocate external TxQ array.");
+ err = ENOMEM;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "External queue is supported.");
}
priv->sh = sh;
priv->dev_port = spawn->phys_port;
@@ -1763,6 +1772,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (eth_dev && priv->flex_item_map)
mlx5_flex_item_port_cleanup(eth_dev);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d15302d00d..e41b1e82d7 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2436,6 +2436,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
dev->data->port_id);
+ ret = mlx5_ext_txq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external TxQ still remain.",
+ dev->data->port_id);
ret = mlx5_txq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Tx queues still remain",
@@ -2447,6 +2451,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
/*
* The interrupt handler port id must be reset before priv is reset
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 07d050b225..5b23043b8b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -381,6 +381,12 @@ struct mlx5_lb_ctx {
RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
+/* External queue descriptor. */
+struct mlx5_external_q {
+ uint32_t hw_id; /* Queue index in the Hardware. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
+};
+
/* HW steering queue job descriptor type. */
enum mlx5_hw_job_type {
MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
@@ -1883,6 +1889,7 @@ struct mlx5_priv {
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
+ struct mlx5_external_q *ext_txqs; /* External TX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index dc5216cb24..9c454983be 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -183,6 +183,9 @@
/* Maximum number of external Rx queues supported by rte_flow */
#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
+/* Maximum number of external Tx queues supported by rte_flow */
+#define MLX5_MAX_EXT_TX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_TX_QUEUE_ID_MIN + 1)
+
/*
* Linux definition of static_assert is found in /usr/include/assert.h.
* Windows does not require a redefinition.
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index cae9d578ab..f23eb1def6 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -27,6 +27,46 @@
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
+/**
+ * Validate given external queue's port is valid or not.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ *
+ * @return
+ * 0 on success, non-0 otherwise
+ */
+int
+mlx5_devx_extq_port_validate(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
+ DRV_LOG(ERR, "Port %u "
+ "external queue isn't supported on local PD and CTX.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ if (!mlx5_devx_obj_ops_en(priv->sh)) {
+ DRV_LOG(ERR,
+ "Port %u external queue isn't supported by Verbs API.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ return 0;
+}
+
/**
* Modify RQ vlan stripping offload
*
diff --git a/drivers/net/mlx5/mlx5_devx.h b/drivers/net/mlx5/mlx5_devx.h
index ebd1da455a..4ab8cfbd22 100644
--- a/drivers/net/mlx5/mlx5_devx.h
+++ b/drivers/net/mlx5/mlx5_devx.h
@@ -12,6 +12,7 @@ int mlx5_txq_devx_modify(struct mlx5_txq_obj *obj,
enum mlx5_txq_modify_type type, uint8_t dev_port);
void mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj);
int mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type);
+int mlx5_devx_extq_port_validate(uint16_t port_id);
extern struct mlx5_obj_ops devx_obj_ops;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index aea799341c..1b721cda5e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -123,6 +123,14 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
+ if (priv->ext_txqs && txqs_n >= MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "port %u cannot handle this many Tx queues (%u), "
+ "the maximal number of internal Tx queues is %u",
+ dev->data->port_id, txqs_n,
+ MLX5_EXTERNAL_TX_QUEUE_ID_MIN - 1);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rxqs_n > priv->sh->dev_cap.ind_table_max_size) {
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
dev->data->port_id, rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8f53e82985..9a359da042 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1954,12 +1954,18 @@ static __rte_always_inline int
flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
{
struct mlx5_txq_ctrl *txq;
+ struct mlx5_external_q *ext_txq;
/* Means Tx queue is PF0. */
if (tx_queue == UINT16_MAX) {
*sqn = 0;
return 0;
}
+ if (mlx5_is_external_txq(dev, tx_queue)) {
+ ext_txq = mlx5_ext_txq_get(dev, tx_queue);
+ *sqn = ext_txq->hw_id;
+ return 0;
+ }
txq = mlx5_txq_get(dev, tx_queue);
if (unlikely(!txq))
return -ENOENT;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index decb14e708..1485556d89 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -185,12 +185,6 @@ struct mlx5_rxq_priv {
uint32_t lwm_devx_subscribed:1;
};
-/* External RX queue descriptor. */
-struct mlx5_external_q {
- uint32_t hw_id; /* Queue index in the Hardware. */
- RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
-};
-
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d6c84b84e4..f13fc3b353 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -3211,6 +3211,7 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
{
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
+ int ret;
if (dpdk_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
@@ -3218,28 +3219,11 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
rte_errno = EINVAL;
return NULL;
}
- if (rte_eth_dev_is_valid_port(port_id) < 0) {
- DRV_LOG(ERR, "There is no Ethernet device for port %u.",
- port_id);
- rte_errno = ENODEV;
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
return NULL;
- }
dev = &rte_eth_devices[port_id];
priv = dev->data->dev_private;
- if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
- DRV_LOG(ERR, "Port %u "
- "external RxQ isn't supported on local PD and CTX.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
- if (!mlx5_devx_obj_ops_en(priv->sh)) {
- DRV_LOG(ERR,
- "Port %u external RxQ isn't supported by Verbs API.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
/*
* When user configures remote PD and CTX and device creates RxQ by
* DevX, external RxQs array is allocated.
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 0d77ff89de..983913faa2 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -227,6 +227,8 @@ void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
int mlx5_count_aggr_ports(struct rte_eth_dev *dev);
int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint8_t affinity);
+int mlx5_ext_txq_verify(struct rte_eth_dev *dev);
+struct mlx5_external_q *mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx);
/* mlx5_tx.c */
@@ -3788,4 +3790,27 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
return loc.pkts_sent;
}
+/**
+ * Check whether given TxQ is external.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue_idx
+ * Tx queue index.
+ *
+ * @return
+ * True if is external TxQ, otherwise false.
+ */
+static __rte_always_inline bool
+mlx5_is_external_txq(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+
+ if (!priv->ext_txqs || queue_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN)
+ return false;
+ txq = &priv->ext_txqs[queue_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+ return !!rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed);
+}
+
#endif /* RTE_PMD_MLX5_TX_H_ */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index da4236f99a..8eb1ae1f03 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -27,6 +27,7 @@
#include "mlx5_tx.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
#include "rte_pmd_mlx5.h"
#include "mlx5_flow.h"
@@ -1183,6 +1184,57 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
return ctrl;
}
+/**
+ * Get an external Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External Tx queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_q *
+mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ MLX5_ASSERT(mlx5_is_external_txq(dev, idx));
+ return &priv->ext_txqs[idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+/**
+ * Verify the external Tx Queue list is empty.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ext_txq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+ uint32_t i;
+ int ret = 0;
+
+ if (priv->ext_txqs == NULL)
+ return 0;
+
+ for (i = MLX5_EXTERNAL_TX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+ txq = mlx5_ext_txq_get(dev, i);
+ if (txq->refcnt < 2)
+ continue;
+ DRV_LOG(DEBUG, "Port %u external TxQ %u still referenced.",
+ dev->data->port_id, i);
+ ++ret;
+ }
+ return ret;
+}
+
/**
* Release a Tx queue.
*
@@ -1416,3 +1468,103 @@ int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
txq_ctrl->txq.tx_aggr_affinity = affinity;
return 0;
}
+
+/**
+ * Validate given external TxQ rte_flow index, and get pointer to concurrent
+ * external TxQ object to map/unmap.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Tx Queue index in rte_flow.
+ *
+ * @return
+ * Pointer to concurrent external TxQ on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_external_q *
+mlx5_external_tx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ int ret;
+
+ if (dpdk_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
+ dpdk_idx, MLX5_EXTERNAL_TX_QUEUE_ID_MIN, UINT16_MAX);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
+ return NULL;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ /*
+ * When user configures remote PD and CTX and device creates TxQ by
+ * DevX, external TxQs array is allocated.
+ */
+ MLX5_ASSERT(priv->ext_txqs != NULL);
+ return &priv->ext_txqs[dpdk_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t unmapped = 0;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ if (ext_txq->hw_id != hw_idx) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u "
+ "is already mapped to HW index (requesting is "
+ "%u, existing is %u).",
+ port_id, dpdk_idx, hw_idx, ext_txq->hw_id);
+ rte_errno = EEXIST;
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %u external TxQ index %u "
+ "is already mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+
+ } else {
+ ext_txq->hw_id = hw_idx;
+ DRV_LOG(DEBUG, "Port %u external TxQ index %u "
+ "is successfully mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+ }
+ return 0;
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t mapped = 1;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (ext_txq->refcnt > 1) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u still referenced.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u doesn't exist.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u external TxQ index %u is successfully unmapped.",
+ port_id, dpdk_idx);
+ return 0;
+}
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 004be0eea1..359e4192c8 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -68,6 +68,11 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
*/
#define RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+/**
+ * External Tx queue rte_flow index minimal value.
+ */
+#define MLX5_EXTERNAL_TX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+
/**
* Tag level to set the linear hash index.
*/
@@ -116,6 +121,49 @@ __rte_experimental
int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id,
uint16_t dpdk_idx);
+/**
+ * Update mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ * @param[in] hw_idx
+ * Queue index in hardware.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EEXIST - a mapping with the same rte_flow index already exists.
+ * - EINVAL - invalid rte_flow index, out of range.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx);
+
+/**
+ * Remove mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EINVAL - invalid index, out of range, still referenced or doesn't exist.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id,
+ uint16_t dpdk_idx);
+
/**
* The rate of the host port shaper will be updated directly at the next
* available descriptor threshold event to the rate that comes with this flag set;
diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
index 8fb0e07303..8a78d14786 100644
--- a/drivers/net/mlx5/version.map
+++ b/drivers/net/mlx5/version.map
@@ -20,4 +20,7 @@ EXPERIMENTAL {
# added in 24.03
rte_pmd_mlx5_create_geneve_tlv_parser;
rte_pmd_mlx5_destroy_geneve_tlv_parser;
+ # added in 24.07
+ rte_pmd_mlx5_external_tx_queue_id_map;
+ rte_pmd_mlx5_external_tx_queue_id_unmap;
};
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH 1/3] net/mlx5: add match with Tx queue item
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-05-31 3:50 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
@ 2024-06-05 8:14 ` Dariusz Sosnowski
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
3 siblings, 0 replies; 12+ messages in thread
From: Dariusz Sosnowski @ 2024-06-05 8:14 UTC (permalink / raw)
To: Suanming Mou, Slava Ovsiienko, Ori Kam, Matan Azrad; +Cc: dev, Raslan Darawsheh
> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Friday, May 31, 2024 05:51
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH 1/3] net/mlx5: add match with Tx queue item
>
> With the item RTE_FLOW_ITEM_TYPE_TX_QUEUE, user will be able to set the Tx
> queue index and create flow match with that queue index.
>
> This commit adds match with RTE_FLOW_ITEM_TX_QUEUE item.
>
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH 2/3] net/mlx5: rename external Rx queue to external queue
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
@ 2024-06-05 8:14 ` Dariusz Sosnowski
0 siblings, 0 replies; 12+ messages in thread
From: Dariusz Sosnowski @ 2024-06-05 8:14 UTC (permalink / raw)
To: Suanming Mou, Slava Ovsiienko, Ori Kam, Matan Azrad; +Cc: dev, Raslan Darawsheh
> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Friday, May 31, 2024 05:51
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH 2/3] net/mlx5: rename external Rx queue to external queue
>
> Due to external Tx queue will be supported, in order to reuse the external queue
> struct, rename the current external Rx queue to external queue.
>
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
2024-05-31 3:50 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
@ 2024-06-05 8:17 ` Dariusz Sosnowski
2024-06-05 8:23 ` Suanming Mou
0 siblings, 1 reply; 12+ messages in thread
From: Dariusz Sosnowski @ 2024-06-05 8:17 UTC (permalink / raw)
To: Suanming Mou, Slava Ovsiienko, Ori Kam, Matan Azrad; +Cc: dev, Raslan Darawsheh
> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Friday, May 31, 2024 05:51
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
>
> For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE, this commit
> provides the map and unmap functions to convert the external created SQ's devx
> ID to DPDK flow item Tx queue ID.
>
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Could you please add external Tx queue feature to release notes and mlx5 PMD docs?
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
2024-06-05 8:17 ` Dariusz Sosnowski
@ 2024-06-05 8:23 ` Suanming Mou
0 siblings, 0 replies; 12+ messages in thread
From: Suanming Mou @ 2024-06-05 8:23 UTC (permalink / raw)
To: Dariusz Sosnowski, Slava Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, Raslan Darawsheh
Hi,
> -----Original Message-----
> From: Dariusz Sosnowski <dsosnowski@nvidia.com>
> Sent: Wednesday, June 5, 2024 4:17 PM
> To: Suanming Mou <suanmingm@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: RE: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
>
> > -----Original Message-----
> > From: Suanming Mou <suanmingm@nvidia.com>
> > Sent: Friday, May 31, 2024 05:51
> > To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> > <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> > <matan@nvidia.com>
> > Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> > Subject: [PATCH 3/3] net/mlx5: add external Tx queue map and unmap
> >
> > For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE, this
> > commit provides the map and unmap functions to convert the external
> > created SQ's devx ID to DPDK flow item Tx queue ID.
> >
> > Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
>
> Could you please add external Tx queue feature to release notes and mlx5 PMD
> docs?
Sure.
>
> Best regards,
> Dariusz Sosnowski
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 1/3] net/mlx5: add match with Tx queue item
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
` (2 preceding siblings ...)
2024-06-05 8:14 ` [PATCH 1/3] net/mlx5: add match with Tx queue item Dariusz Sosnowski
@ 2024-06-05 9:31 ` Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
` (2 more replies)
3 siblings, 3 replies; 12+ messages in thread
From: Suanming Mou @ 2024-06-05 9:31 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
With the item RTE_FLOW_ITEM_TYPE_TX_QUEUE, user will be able
to set the Tx queue index and create flow match with that
queue index.
This commit adds match with RTE_FLOW_ITEM_TX_QUEUE item.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_24_07.rst | 4 ++
drivers/net/mlx5/hws/mlx5dr_definer.c | 50 +++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 58 ++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_dv.c | 69 ++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
6 files changed, 183 insertions(+)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 81a7067cc3..056e04275b 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -92,6 +92,7 @@ quota = Y
random = Y
tag = Y
tcp = Y
+tx_queue = Y
udp = Y
vlan = Y
vxlan = Y
diff --git a/doc/guides/rel_notes/release_24_07.rst b/doc/guides/rel_notes/release_24_07.rst
index ffbe9ce051..46efc04eac 100644
--- a/doc/guides/rel_notes/release_24_07.rst
+++ b/doc/guides/rel_notes/release_24_07.rst
@@ -81,6 +81,10 @@ New Features
* Added SSE/NEON vector datapath.
+* **Updated NVIDIA mlx5 driver.**
+
+ * Added match with Tx queue.
+
Removed Items
-------------
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index dabfac8abc..bc128c7b99 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -6,6 +6,7 @@
#define GTP_PDU_SC 0x85
#define BAD_PORT 0xBAD
+#define BAD_SQN 0xBAD
#define ETH_TYPE_IPV4_VXLAN 0x0800
#define ETH_TYPE_IPV6_VXLAN 0x86DD
#define UDP_VXLAN_PORT 4789
@@ -878,6 +879,22 @@ mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_tx_queue_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_tx_queue *v = item_spec;
+ uint32_t sqn = 0;
+ int ret;
+
+ ret = flow_hw_conv_sqn(fc->extra_data, v->tx_queue, &sqn);
+ if (unlikely(ret))
+ sqn = BAD_SQN;
+
+ DR_SET(tag, sqn, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static int
mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -1850,6 +1867,35 @@ mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_tx_queue(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_tx_queue *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (m->tx_queue) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_tx_queue_set;
+ /* User extra_data to save DPDK port_id. */
+ fc->extra_data = flow_hw_get_port_id(cd->ctx);
+ if (fc->extra_data == UINT16_MAX) {
+ DR_LOG(ERR, "Invalid port for item tx_queue");
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -3150,6 +3196,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
item_flags |= MLX5_FLOW_LAYER_VXLAN;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = mlx5dr_definer_conv_item_tx_queue(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_SQ;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index dd5b30a8a4..357267e0c3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -19,6 +19,7 @@
#include "mlx5.h"
#include "rte_pmd_mlx5.h"
#include "hws/mlx5dr.h"
+#include "mlx5_tx.h"
/* E-Switch Manager port, used for rte_flow_item_port_id. */
#define MLX5_PORT_ESW_MGR UINT32_MAX
@@ -1945,6 +1946,63 @@ struct flow_hw_port_info {
extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
+/*
+ * Get sqn for given tx_queue.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
+{
+ struct mlx5_txq_ctrl *txq;
+
+ /* Means Tx queue is PF0. */
+ if (tx_queue == UINT16_MAX) {
+ *sqn = 0;
+ return 0;
+ }
+ txq = mlx5_txq_get(dev, tx_queue);
+ if (unlikely(!txq))
+ return -ENOENT;
+ *sqn = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, tx_queue);
+ return 0;
+}
+
+/*
+ * Convert sqn for given rte_eth_dev port.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
+{
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+ return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
+}
+
+/*
+ * Get given rte_eth_dev port_id.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline uint16_t
+flow_hw_get_port_id(void *dr_ctx)
+{
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ uint16_t port_id;
+
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ struct mlx5_priv *priv;
+
+ priv = rte_eth_devices[port_id].data->dev_private;
+ if (priv->dr_ctx == dr_ctx)
+ return port_id;
+ }
+#else
+ RTE_SET_USED(dr_ctx);
+#endif
+ return UINT16_MAX;
+}
+
/*
* Get metadata match tag and mask for given rte_eth_dev port.
* Used in HWS rule creation.
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 06f5427abf..14cdd4468d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8025,6 +8025,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
last_item = MLX5_FLOW_ITEM_SQ;
break;
@@ -12199,6 +12200,52 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,
return counter;
}
+/**
+ * Add Tx queue matcher
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] key_type
+ * Set flow matcher mask or value.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, void *key,
+ const struct rte_flow_item *item, uint32_t key_type)
+{
+ const struct rte_flow_item_tx_queue *queue_m;
+ const struct rte_flow_item_tx_queue *queue_v;
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint32_t tx_queue;
+ uint32_t sqn = 0;
+ int ret;
+
+ MLX5_ITEM_UPDATE(item, key_type, queue_v, queue_m, &rte_flow_item_tx_queue_mask);
+ if (!queue_m || !queue_v)
+ return -EINVAL;
+ if (key_type & MLX5_SET_MATCHER_V) {
+ tx_queue = queue_v->tx_queue;
+ if (key_type == MLX5_SET_MATCHER_SW_V)
+ tx_queue &= queue_m->tx_queue;
+ ret = flow_hw_get_sqn(dev, tx_queue, &sqn);
+ if (unlikely(ret))
+ return -ret;
+ } else {
+ /* Due to tx_queue to sqn converting, only fully masked value support. */
+ if (queue_m->tx_queue != rte_flow_item_tx_queue_mask.tx_queue)
+ return -EINVAL;
+ sqn = UINT32_MAX;
+ }
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn, sqn);
+ return 0;
+}
+
/**
* Add SQ matcher
*
@@ -14169,6 +14216,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_mlx5_item_tag(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, key, items, key_type);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item");
+ last_item = MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(key, items, key_type);
last_item = MLX5_FLOW_ITEM_SQ;
@@ -14399,6 +14454,20 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item spec");
+ ret = flow_dv_translate_item_tx_queue(dev, match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item mask");
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(match_value, items,
MLX5_SET_MATCHER_SW_V);
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 63194935a3..cf698b3ec8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8018,6 +8018,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_MPLS:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
@ 2024-06-05 9:31 ` Suanming Mou
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-06 10:50 ` [PATCH v2 1/3] net/mlx5: add match with Tx queue item Raslan Darawsheh
2 siblings, 0 replies; 12+ messages in thread
From: Suanming Mou @ 2024-06-05 9:31 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
Due to external Tx queue will be supported, in order to reuse
the external queue struct, rename the current external Rx
queue to external queue.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 2 +-
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_devx.c | 2 +-
drivers/net/mlx5/mlx5_rx.h | 8 ++++----
drivers/net/mlx5/mlx5_rxq.c | 16 ++++++++--------
5 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index de3df17108..99de52936a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1216,7 +1216,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
*/
if (mlx5_imported_pd_and_ctx(sh->cdev) && mlx5_devx_obj_ops_en(sh)) {
priv->ext_rxqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
- sizeof(struct mlx5_external_rxq) *
+ sizeof(struct mlx5_external_q) *
MLX5_MAX_EXT_RX_QUEUES, 0,
SOCKET_ID_ANY);
if (priv->ext_rxqs == NULL) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e2c22ffe97..e85308f6e0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1882,7 +1882,7 @@ struct mlx5_priv {
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
- struct mlx5_external_rxq *ext_rxqs; /* External RX queues array. */
+ struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 9fa400fc48..cae9d578ab 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -673,7 +673,7 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
}
for (i = 0; i != queues_n; ++i) {
if (mlx5_is_external_rxq(dev, queues[i])) {
- struct mlx5_external_rxq *ext_rxq =
+ struct mlx5_external_q *ext_rxq =
mlx5_ext_rxq_get(dev, queues[i]);
rqt_attr->rq_list[i] = ext_rxq->hw_id;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index d008e4dd3a..decb14e708 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -186,7 +186,7 @@ struct mlx5_rxq_priv {
};
/* External RX queue descriptor. */
-struct mlx5_external_rxq {
+struct mlx5_external_q {
uint32_t hw_id; /* Queue index in the Hardware. */
RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
@@ -227,10 +227,10 @@ uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
-struct mlx5_external_rxq *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
+struct mlx5_external_q *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
uint16_t idx);
uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
-struct mlx5_external_rxq *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
+struct mlx5_external_q *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
@@ -661,7 +661,7 @@ static __rte_always_inline bool
mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_external_rxq *rxq;
+ struct mlx5_external_q *rxq;
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f67aaa6178..d6c84b84e4 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2133,10 +2133,10 @@ mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_external_rxq *
+struct mlx5_external_q *
mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+ struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
@@ -2156,7 +2156,7 @@ mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
uint32_t
mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+ struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
@@ -2172,7 +2172,7 @@ mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_external_rxq *
+struct mlx5_external_q *
mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -2336,7 +2336,7 @@ int
mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_external_rxq *rxq;
+ struct mlx5_external_q *rxq;
uint32_t i;
int ret = 0;
@@ -3206,7 +3206,7 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
* Pointer to concurrent external RxQ on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_external_rxq *
+static struct mlx5_external_q *
mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
{
struct rte_eth_dev *dev;
@@ -3252,7 +3252,7 @@ int
rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
uint32_t hw_idx)
{
- struct mlx5_external_rxq *ext_rxq;
+ struct mlx5_external_q *ext_rxq;
uint32_t unmapped = 0;
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
@@ -3284,7 +3284,7 @@ rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
int
rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
{
- struct mlx5_external_rxq *ext_rxq;
+ struct mlx5_external_q *ext_rxq;
uint32_t mapped = 1;
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
@ 2024-06-05 9:31 ` Suanming Mou
2024-06-05 15:16 ` Dariusz Sosnowski
2024-06-06 10:50 ` [PATCH v2 1/3] net/mlx5: add match with Tx queue item Raslan Darawsheh
2 siblings, 1 reply; 12+ messages in thread
From: Suanming Mou @ 2024-06-05 9:31 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam, Matan Azrad
Cc: dev, rasland
For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE,
this commit provides the map and unmap functions to convert the
external created SQ's devx ID to DPDK flow item Tx queue ID.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
v2: add feature and release notes.
---
doc/guides/nics/mlx5.rst | 1 +
doc/guides/rel_notes/release_24_07.rst | 1 +
drivers/net/mlx5/linux/mlx5_os.c | 12 +-
drivers/net/mlx5/mlx5.c | 5 +
drivers/net/mlx5/mlx5.h | 7 ++
drivers/net/mlx5/mlx5_defs.h | 3 +
drivers/net/mlx5/mlx5_devx.c | 40 +++++++
drivers/net/mlx5/mlx5_devx.h | 1 +
drivers/net/mlx5/mlx5_ethdev.c | 8 ++
drivers/net/mlx5/mlx5_flow.h | 6 +
drivers/net/mlx5/mlx5_rx.h | 6 -
drivers/net/mlx5/mlx5_rxq.c | 22 +---
drivers/net/mlx5/mlx5_tx.h | 25 ++++
drivers/net/mlx5/mlx5_txq.c | 152 +++++++++++++++++++++++++
drivers/net/mlx5/rte_pmd_mlx5.h | 48 ++++++++
drivers/net/mlx5/version.map | 3 +
16 files changed, 314 insertions(+), 26 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index b5928d40b2..5cd41d3c7f 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -169,6 +169,7 @@ Features
- Sub-Function.
- Matching on represented port.
- Matching on aggregated affinity.
+- Matching on external Tx queue.
Limitations
diff --git a/doc/guides/rel_notes/release_24_07.rst b/doc/guides/rel_notes/release_24_07.rst
index 46efc04eac..3a3257fcd5 100644
--- a/doc/guides/rel_notes/release_24_07.rst
+++ b/doc/guides/rel_notes/release_24_07.rst
@@ -84,6 +84,7 @@ New Features
* **Updated NVIDIA mlx5 driver.**
* Added match with Tx queue.
+ * Added match with external Tx queue.
Removed Items
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 99de52936a..f887501a9b 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1224,7 +1224,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
- DRV_LOG(DEBUG, "External RxQ is supported.");
+ priv->ext_txqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+ sizeof(struct mlx5_external_q) *
+ MLX5_MAX_EXT_TX_QUEUES, 0,
+ SOCKET_ID_ANY);
+ if (priv->ext_txqs == NULL) {
+ DRV_LOG(ERR, "Fail to allocate external TxQ array.");
+ err = ENOMEM;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "External queue is supported.");
}
priv->sh = sh;
priv->dev_port = spawn->phys_port;
@@ -1762,6 +1771,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (eth_dev && priv->flex_item_map)
mlx5_flex_item_port_cleanup(eth_dev);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d15302d00d..e41b1e82d7 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2436,6 +2436,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
dev->data->port_id);
+ ret = mlx5_ext_txq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external TxQ still remain.",
+ dev->data->port_id);
ret = mlx5_txq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Tx queues still remain",
@@ -2447,6 +2451,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
mlx5_free(priv->ext_rxqs);
+ mlx5_free(priv->ext_txqs);
sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
/*
* The interrupt handler port id must be reset before priv is reset
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e85308f6e0..91ceceb34a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -381,6 +381,12 @@ struct mlx5_lb_ctx {
RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
+/* External queue descriptor. */
+struct mlx5_external_q {
+ uint32_t hw_id; /* Queue index in the Hardware. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
+};
+
/* HW steering queue job descriptor type. */
enum mlx5_hw_job_type {
MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
@@ -1883,6 +1889,7 @@ struct mlx5_priv {
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_external_q *ext_rxqs; /* External RX queues array. */
+ struct mlx5_external_q *ext_txqs; /* External TX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index dc5216cb24..9c454983be 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -183,6 +183,9 @@
/* Maximum number of external Rx queues supported by rte_flow */
#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
+/* Maximum number of external Tx queues supported by rte_flow */
+#define MLX5_MAX_EXT_TX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_TX_QUEUE_ID_MIN + 1)
+
/*
* Linux definition of static_assert is found in /usr/include/assert.h.
* Windows does not require a redefinition.
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index cae9d578ab..f23eb1def6 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -27,6 +27,46 @@
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
+/**
+ * Validate given external queue's port is valid or not.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ *
+ * @return
+ * 0 on success, non-0 otherwise
+ */
+int
+mlx5_devx_extq_port_validate(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
+ DRV_LOG(ERR, "Port %u "
+ "external queue isn't supported on local PD and CTX.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ if (!mlx5_devx_obj_ops_en(priv->sh)) {
+ DRV_LOG(ERR,
+ "Port %u external queue isn't supported by Verbs API.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ return 0;
+}
+
/**
* Modify RQ vlan stripping offload
*
diff --git a/drivers/net/mlx5/mlx5_devx.h b/drivers/net/mlx5/mlx5_devx.h
index ebd1da455a..4ab8cfbd22 100644
--- a/drivers/net/mlx5/mlx5_devx.h
+++ b/drivers/net/mlx5/mlx5_devx.h
@@ -12,6 +12,7 @@ int mlx5_txq_devx_modify(struct mlx5_txq_obj *obj,
enum mlx5_txq_modify_type type, uint8_t dev_port);
void mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj);
int mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type);
+int mlx5_devx_extq_port_validate(uint16_t port_id);
extern struct mlx5_obj_ops devx_obj_ops;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index aea799341c..1b721cda5e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -123,6 +123,14 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
+ if (priv->ext_txqs && txqs_n >= MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "port %u cannot handle this many Tx queues (%u), "
+ "the maximal number of internal Tx queues is %u",
+ dev->data->port_id, txqs_n,
+ MLX5_EXTERNAL_TX_QUEUE_ID_MIN - 1);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rxqs_n > priv->sh->dev_cap.ind_table_max_size) {
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
dev->data->port_id, rxqs_n);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 357267e0c3..ba75b99139 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1954,12 +1954,18 @@ static __rte_always_inline int
flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
{
struct mlx5_txq_ctrl *txq;
+ struct mlx5_external_q *ext_txq;
/* Means Tx queue is PF0. */
if (tx_queue == UINT16_MAX) {
*sqn = 0;
return 0;
}
+ if (mlx5_is_external_txq(dev, tx_queue)) {
+ ext_txq = mlx5_ext_txq_get(dev, tx_queue);
+ *sqn = ext_txq->hw_id;
+ return 0;
+ }
txq = mlx5_txq_get(dev, tx_queue);
if (unlikely(!txq))
return -ENOENT;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index decb14e708..1485556d89 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -185,12 +185,6 @@ struct mlx5_rxq_priv {
uint32_t lwm_devx_subscribed:1;
};
-/* External RX queue descriptor. */
-struct mlx5_external_q {
- uint32_t hw_id; /* Queue index in the Hardware. */
- RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
-};
-
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d6c84b84e4..f13fc3b353 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -3211,6 +3211,7 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
{
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
+ int ret;
if (dpdk_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
@@ -3218,28 +3219,11 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
rte_errno = EINVAL;
return NULL;
}
- if (rte_eth_dev_is_valid_port(port_id) < 0) {
- DRV_LOG(ERR, "There is no Ethernet device for port %u.",
- port_id);
- rte_errno = ENODEV;
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
return NULL;
- }
dev = &rte_eth_devices[port_id];
priv = dev->data->dev_private;
- if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
- DRV_LOG(ERR, "Port %u "
- "external RxQ isn't supported on local PD and CTX.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
- if (!mlx5_devx_obj_ops_en(priv->sh)) {
- DRV_LOG(ERR,
- "Port %u external RxQ isn't supported by Verbs API.",
- port_id);
- rte_errno = ENOTSUP;
- return NULL;
- }
/*
* When user configures remote PD and CTX and device creates RxQ by
* DevX, external RxQs array is allocated.
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 0d77ff89de..983913faa2 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -227,6 +227,8 @@ void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
int mlx5_count_aggr_ports(struct rte_eth_dev *dev);
int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint8_t affinity);
+int mlx5_ext_txq_verify(struct rte_eth_dev *dev);
+struct mlx5_external_q *mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx);
/* mlx5_tx.c */
@@ -3788,4 +3790,27 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
return loc.pkts_sent;
}
+/**
+ * Check whether given TxQ is external.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue_idx
+ * Tx queue index.
+ *
+ * @return
+ * True if is external TxQ, otherwise false.
+ */
+static __rte_always_inline bool
+mlx5_is_external_txq(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+
+ if (!priv->ext_txqs || queue_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN)
+ return false;
+ txq = &priv->ext_txqs[queue_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+ return !!rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed);
+}
+
#endif /* RTE_PMD_MLX5_TX_H_ */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index da4236f99a..8eb1ae1f03 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -27,6 +27,7 @@
#include "mlx5_tx.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
#include "rte_pmd_mlx5.h"
#include "mlx5_flow.h"
@@ -1183,6 +1184,57 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
return ctrl;
}
+/**
+ * Get an external Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External Tx queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_q *
+mlx5_ext_txq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ MLX5_ASSERT(mlx5_is_external_txq(dev, idx));
+ return &priv->ext_txqs[idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+/**
+ * Verify the external Tx Queue list is empty.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ext_txq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_q *txq;
+ uint32_t i;
+ int ret = 0;
+
+ if (priv->ext_txqs == NULL)
+ return 0;
+
+ for (i = MLX5_EXTERNAL_TX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+ txq = mlx5_ext_txq_get(dev, i);
+ if (txq->refcnt < 2)
+ continue;
+ DRV_LOG(DEBUG, "Port %u external TxQ %u still referenced.",
+ dev->data->port_id, i);
+ ++ret;
+ }
+ return ret;
+}
+
/**
* Release a Tx queue.
*
@@ -1416,3 +1468,103 @@ int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,
txq_ctrl->txq.tx_aggr_affinity = affinity;
return 0;
}
+
+/**
+ * Validate given external TxQ rte_flow index, and get pointer to concurrent
+ * external TxQ object to map/unmap.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Tx Queue index in rte_flow.
+ *
+ * @return
+ * Pointer to concurrent external TxQ on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_external_q *
+mlx5_external_tx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ int ret;
+
+ if (dpdk_idx < MLX5_EXTERNAL_TX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
+ dpdk_idx, MLX5_EXTERNAL_TX_QUEUE_ID_MIN, UINT16_MAX);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ ret = mlx5_devx_extq_port_validate(port_id);
+ if (unlikely(ret))
+ return NULL;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ /*
+ * When user configures remote PD and CTX and device creates TxQ by
+ * DevX, external TxQs array is allocated.
+ */
+ MLX5_ASSERT(priv->ext_txqs != NULL);
+ return &priv->ext_txqs[dpdk_idx - MLX5_EXTERNAL_TX_QUEUE_ID_MIN];
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t unmapped = 0;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ if (ext_txq->hw_id != hw_idx) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u "
+ "is already mapped to HW index (requesting is "
+ "%u, existing is %u).",
+ port_id, dpdk_idx, hw_idx, ext_txq->hw_id);
+ rte_errno = EEXIST;
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %u external TxQ index %u "
+ "is already mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+
+ } else {
+ ext_txq->hw_id = hw_idx;
+ DRV_LOG(DEBUG, "Port %u external TxQ index %u "
+ "is successfully mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+ }
+ return 0;
+}
+
+int
+rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct mlx5_external_q *ext_txq;
+ uint32_t mapped = 1;
+
+ ext_txq = mlx5_external_tx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_txq == NULL)
+ return -rte_errno;
+ if (ext_txq->refcnt > 1) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u still referenced.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_txq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
+ DRV_LOG(ERR, "Port %u external TxQ index %u doesn't exist.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u external TxQ index %u is successfully unmapped.",
+ port_id, dpdk_idx);
+ return 0;
+}
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 004be0eea1..359e4192c8 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -68,6 +68,11 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
*/
#define RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+/**
+ * External Tx queue rte_flow index minimal value.
+ */
+#define MLX5_EXTERNAL_TX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+
/**
* Tag level to set the linear hash index.
*/
@@ -116,6 +121,49 @@ __rte_experimental
int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id,
uint16_t dpdk_idx);
+/**
+ * Update mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ * @param[in] hw_idx
+ * Queue index in hardware.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EEXIST - a mapping with the same rte_flow index already exists.
+ * - EINVAL - invalid rte_flow index, out of range.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx);
+
+/**
+ * Remove mapping between rte_flow Tx queue index (16 bits) and HW queue index (32
+ * bits) for TxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EINVAL - invalid index, out of range, still referenced or doesn't exist.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external TxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id,
+ uint16_t dpdk_idx);
+
/**
* The rate of the host port shaper will be updated directly at the next
* available descriptor threshold event to the rate that comes with this flag set;
diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
index 8fb0e07303..8a78d14786 100644
--- a/drivers/net/mlx5/version.map
+++ b/drivers/net/mlx5/version.map
@@ -20,4 +20,7 @@ EXPERIMENTAL {
# added in 24.03
rte_pmd_mlx5_create_geneve_tlv_parser;
rte_pmd_mlx5_destroy_geneve_tlv_parser;
+ # added in 24.07
+ rte_pmd_mlx5_external_tx_queue_id_map;
+ rte_pmd_mlx5_external_tx_queue_id_unmap;
};
--
2.34.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
@ 2024-06-05 15:16 ` Dariusz Sosnowski
0 siblings, 0 replies; 12+ messages in thread
From: Dariusz Sosnowski @ 2024-06-05 15:16 UTC (permalink / raw)
To: Suanming Mou, Slava Ovsiienko, Ori Kam, Matan Azrad; +Cc: dev, Raslan Darawsheh
> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Wednesday, June 5, 2024 11:32
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap
>
> For using external created Tx queues in RTE_FLOW_ITEM_TX_QUEUE, this commit
> provides the map and unmap functions to convert the external created SQ's devx
> ID to DPDK flow item Tx queue ID.
>
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/3] net/mlx5: add match with Tx queue item
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
@ 2024-06-06 10:50 ` Raslan Darawsheh
2 siblings, 0 replies; 12+ messages in thread
From: Raslan Darawsheh @ 2024-06-06 10:50 UTC (permalink / raw)
To: Suanming Mou, Dariusz Sosnowski, Slava Ovsiienko, Ori Kam, Matan Azrad
Cc: dev
Hi,
From: Suanming Mou <suanmingm@nvidia.com>
Sent: Wednesday, June 5, 2024 12:31 PM
To: Dariusz Sosnowski; Slava Ovsiienko; Ori Kam; Matan Azrad
Cc: dev@dpdk.org; Raslan Darawsheh
Subject: [PATCH v2 1/3] net/mlx5: add match with Tx queue item
With the item RTE_FLOW_ITEM_TYPE_TX_QUEUE, user will be able
to set the Tx queue index and create flow match with that
queue index.
This commit adds match with RTE_FLOW_ITEM_TX_QUEUE item.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Series rebased and applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2024-06-06 10:50 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-05-31 3:50 [PATCH 1/3] net/mlx5: add match with Tx queue item Suanming Mou
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 8:14 ` Dariusz Sosnowski
2024-05-31 3:50 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-05 8:17 ` Dariusz Sosnowski
2024-06-05 8:23 ` Suanming Mou
2024-06-05 8:14 ` [PATCH 1/3] net/mlx5: add match with Tx queue item Dariusz Sosnowski
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-05 15:16 ` Dariusz Sosnowski
2024-06-06 10:50 ` [PATCH v2 1/3] net/mlx5: add match with Tx queue item Raslan Darawsheh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).