From: Suanming Mou <suanmingm@nvidia.com>
To: Dariusz Sosnowski <dsosnowski@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Ori Kam <orika@nvidia.com>, Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>
Subject: [PATCH 1/3] net/mlx5: add match with Tx queue item
Date: Fri, 31 May 2024 11:50:32 +0800 [thread overview]
Message-ID: <20240531035034.1731943-1-suanmingm@nvidia.com> (raw)
With the item RTE_FLOW_ITEM_TYPE_TX_QUEUE, user will be able
to set the Tx queue index and create flow match with that
queue index.
This commit adds match with RTE_FLOW_ITEM_TX_QUEUE item.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_24_07.rst | 4 ++
drivers/net/mlx5/hws/mlx5dr_definer.c | 50 +++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 58 ++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_dv.c | 69 ++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
6 files changed, 183 insertions(+)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 81a7067cc3..056e04275b 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -92,6 +92,7 @@ quota = Y
random = Y
tag = Y
tcp = Y
+tx_queue = Y
udp = Y
vlan = Y
vxlan = Y
diff --git a/doc/guides/rel_notes/release_24_07.rst b/doc/guides/rel_notes/release_24_07.rst
index ffbe9ce051..46efc04eac 100644
--- a/doc/guides/rel_notes/release_24_07.rst
+++ b/doc/guides/rel_notes/release_24_07.rst
@@ -81,6 +81,10 @@ New Features
* Added SSE/NEON vector datapath.
+* **Updated NVIDIA mlx5 driver.**
+
+ * Added match with Tx queue.
+
Removed Items
-------------
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index dabfac8abc..bc128c7b99 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -6,6 +6,7 @@
#define GTP_PDU_SC 0x85
#define BAD_PORT 0xBAD
+#define BAD_SQN 0xBAD
#define ETH_TYPE_IPV4_VXLAN 0x0800
#define ETH_TYPE_IPV6_VXLAN 0x86DD
#define UDP_VXLAN_PORT 4789
@@ -878,6 +879,22 @@ mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_tx_queue_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_tx_queue *v = item_spec;
+ uint32_t sqn = 0;
+ int ret;
+
+ ret = flow_hw_conv_sqn(fc->extra_data, v->tx_queue, &sqn);
+ if (unlikely(ret))
+ sqn = BAD_SQN;
+
+ DR_SET(tag, sqn, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static int
mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -1850,6 +1867,35 @@ mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_tx_queue(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_tx_queue *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (m->tx_queue) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_tx_queue_set;
+ /* User extra_data to save DPDK port_id. */
+ fc->extra_data = flow_hw_get_port_id(cd->ctx);
+ if (fc->extra_data == UINT16_MAX) {
+ DR_LOG(ERR, "Invalid port for item tx_queue");
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -3150,6 +3196,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
item_flags |= MLX5_FLOW_LAYER_VXLAN;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = mlx5dr_definer_conv_item_tx_queue(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_SQ;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8b4088e35e..8f53e82985 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -19,6 +19,7 @@
#include "mlx5.h"
#include "rte_pmd_mlx5.h"
#include "hws/mlx5dr.h"
+#include "mlx5_tx.h"
/* E-Switch Manager port, used for rte_flow_item_port_id. */
#define MLX5_PORT_ESW_MGR UINT32_MAX
@@ -1945,6 +1946,63 @@ struct flow_hw_port_info {
extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
+/*
+ * Get sqn for given tx_queue.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn)
+{
+ struct mlx5_txq_ctrl *txq;
+
+ /* Means Tx queue is PF0. */
+ if (tx_queue == UINT16_MAX) {
+ *sqn = 0;
+ return 0;
+ }
+ txq = mlx5_txq_get(dev, tx_queue);
+ if (unlikely(!txq))
+ return -ENOENT;
+ *sqn = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, tx_queue);
+ return 0;
+}
+
+/*
+ * Convert sqn for given rte_eth_dev port.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline int
+flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn)
+{
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+ return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn);
+}
+
+/*
+ * Get given rte_eth_dev port_id.
+ * Used in HWS rule creation.
+ */
+static __rte_always_inline uint16_t
+flow_hw_get_port_id(void *dr_ctx)
+{
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ uint16_t port_id;
+
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ struct mlx5_priv *priv;
+
+ priv = rte_eth_devices[port_id].data->dev_private;
+ if (priv->dr_ctx == dr_ctx)
+ return port_id;
+ }
+#else
+ RTE_SET_USED(dr_ctx);
+#endif
+ return UINT16_MAX;
+}
+
/*
* Get metadata match tag and mask for given rte_eth_dev port.
* Used in HWS rule creation.
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 6f72185916..a834b3e2e0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8023,6 +8023,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
last_item = MLX5_FLOW_ITEM_SQ;
break;
@@ -12197,6 +12198,52 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,
return counter;
}
+/**
+ * Add Tx queue matcher
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] key_type
+ * Set flow matcher mask or value.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, void *key,
+ const struct rte_flow_item *item, uint32_t key_type)
+{
+ const struct rte_flow_item_tx_queue *queue_m;
+ const struct rte_flow_item_tx_queue *queue_v;
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint32_t tx_queue;
+ uint32_t sqn = 0;
+ int ret;
+
+ MLX5_ITEM_UPDATE(item, key_type, queue_v, queue_m, &rte_flow_item_tx_queue_mask);
+ if (!queue_m || !queue_v)
+ return -EINVAL;
+ if (key_type & MLX5_SET_MATCHER_V) {
+ tx_queue = queue_v->tx_queue;
+ if (key_type == MLX5_SET_MATCHER_SW_V)
+ tx_queue &= queue_m->tx_queue;
+ ret = flow_hw_get_sqn(dev, tx_queue, &sqn);
+ if (unlikely(ret))
+ return -ret;
+ } else {
+ /* Due to tx_queue to sqn converting, only fully masked value support. */
+ if (queue_m->tx_queue != rte_flow_item_tx_queue_mask.tx_queue)
+ return -EINVAL;
+ sqn = UINT32_MAX;
+ }
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn, sqn);
+ return 0;
+}
+
/**
* Add SQ matcher
*
@@ -14167,6 +14214,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_mlx5_item_tag(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TAG;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, key, items, key_type);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item");
+ last_item = MLX5_FLOW_ITEM_SQ;
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(key, items, key_type);
last_item = MLX5_FLOW_ITEM_SQ;
@@ -14397,6 +14452,20 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ ret = flow_dv_translate_item_tx_queue(dev, match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item spec");
+ ret = flow_dv_translate_item_tx_queue(dev, match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid tx_queue item mask");
+ break;
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_sq(match_value, items,
MLX5_SET_MATCHER_SW_V);
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 68c5a36bbb..39c9a0ba50 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7668,6 +7668,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_MPLS:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
--
2.34.1
next reply other threads:[~2024-05-31 3:51 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-31 3:50 Suanming Mou [this message]
2024-05-31 3:50 ` [PATCH 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 8:14 ` Dariusz Sosnowski
2024-05-31 3:50 ` [PATCH 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-05 8:17 ` Dariusz Sosnowski
2024-06-05 8:23 ` Suanming Mou
2024-06-05 8:14 ` [PATCH 1/3] net/mlx5: add match with Tx queue item Dariusz Sosnowski
2024-06-05 9:31 ` [PATCH v2 " Suanming Mou
2024-06-05 9:31 ` [PATCH v2 2/3] net/mlx5: rename external Rx queue to external queue Suanming Mou
2024-06-05 9:31 ` [PATCH v2 3/3] net/mlx5: add external Tx queue map and unmap Suanming Mou
2024-06-05 15:16 ` Dariusz Sosnowski
2024-06-06 10:50 ` [PATCH v2 1/3] net/mlx5: add match with Tx queue item Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240531035034.1731943-1-suanmingm@nvidia.com \
--to=suanmingm@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).