From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH 5/6] net/mlx5: support queue/RSS action for external RxQ
Date: Tue, 22 Feb 2022 23:04:15 +0200 [thread overview]
Message-ID: <20220222210416.2669519-6-michaelba@nvidia.com> (raw)
In-Reply-To: <20220222210416.2669519-1-michaelba@nvidia.com>
Add support queue/RSS action for external RxQ.
In indirection table creation, the queue index will be taken from
mapping array.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
doc/guides/nics/mlx5.rst | 1 +
doc/guides/rel_notes/release_22_03.rst | 6 ++
drivers/net/mlx5/mlx5.c | 8 +-
drivers/net/mlx5/mlx5_devx.c | 30 +++++--
drivers/net/mlx5/mlx5_flow.c | 29 +++++--
drivers/net/mlx5/mlx5_rx.h | 30 +++++++
drivers/net/mlx5/mlx5_rxq.c | 116 +++++++++++++++++++++++--
7 files changed, 194 insertions(+), 26 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 748939527d..724e34d98b 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -38,6 +38,7 @@ Features
- Multiple TX and RX queues.
- Shared Rx queue.
- Rx queue delay drop.
+- Support steering for external Rx queue.
- Support for scattered TX frames.
- Advanced support for scattered Rx frames with tunable buffer attributes.
- IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index 41923f50e6..b5dd5d9913 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -118,6 +118,12 @@ New Features
* Added PPPoL2TPv2oUDP FDIR distribute packets based on inner IP
src/dst address and UDP/TCP src/dst port.
+* **Updated Mellanox mlx5 driver.**
+
+ Updated the Mellanox mlx5 driver with new features and improvements, including:
+
+ * Support steering for external Rx queue.
+
* **Updated Wangxun ngbe driver.**
* Added support for devices of custom PHY interfaces.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 415e0fe2f2..9760f52b46 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1855,8 +1855,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
close(priv->nl_socket_rdma);
if (priv->vmwa_context)
mlx5_vlan_vmwa_exit(priv->vmwa_context);
- if (priv->ext_rxqs)
- mlx5_free(priv->ext_rxqs);
ret = mlx5_hrxq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -1869,6 +1867,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
dev->data->port_id);
+ ret = mlx5_ext_rxq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
+ dev->data->port_id);
ret = mlx5_rxq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Rx queues still remain",
@@ -1887,6 +1889,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
dev->data->port_id);
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
+ if (priv->ext_rxqs)
+ mlx5_free(priv->ext_rxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 154df99251..19510a540c 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -580,13 +580,21 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
return rqt_attr;
}
for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
+ if (mlx5_is_external_rxq(dev, queues[i])) {
+ struct mlx5_external_rxq *ext_rxq =
+ mlx5_ext_rxq_get(dev, queues[i]);
- MLX5_ASSERT(rxq != NULL);
- if (rxq->ctrl->is_hairpin)
- rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
- else
- rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
+ rqt_attr->rq_list[i] = ext_rxq->hw_id;
+ } else {
+ struct mlx5_rxq_priv *rxq =
+ mlx5_rxq_get(dev, queues[i]);
+
+ MLX5_ASSERT(rxq != NULL);
+ if (rxq->ctrl->is_hairpin)
+ rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
+ else
+ rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
+ }
}
MLX5_ASSERT(i > 0);
for (j = 0; i != rqt_n; ++j, ++i)
@@ -711,7 +719,13 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
uint32_t i;
/* NULL queues designate drop queue. */
- if (ind_tbl->queues != NULL) {
+ if (ind_tbl->queues == NULL) {
+ is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
+ } else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) {
+ /* External RxQ supports neither Hairpin nor LRO. */
+ is_hairpin = false;
+ lro = false;
+ } else {
is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]);
/* Enable TIR LRO only if all the queues were configured for. */
for (i = 0; i < ind_tbl->queues_n; ++i) {
@@ -723,8 +737,6 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
break;
}
}
- } else {
- is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
}
memset(tir_attr, 0, sizeof(*tir_attr));
tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5e8454f5f5..1f81cedecb 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1631,6 +1631,12 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 fate actions in"
" same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "queue action not supported for egress.");
+ if (mlx5_is_external_rxq(dev, queue->index))
+ return 0;
if (!priv->rxqs_n)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -1645,11 +1651,6 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
"queue is not configured");
- if (attr->egress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
- "queue action not supported for "
- "egress");
return 0;
}
@@ -1664,7 +1665,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
* Size of the @p queues array.
* @param[out] error
* On error, filled with a textual error description.
- * @param[out] queue
+ * @param[out] queue_idx
* On error, filled with an offending queue index in @p queues array.
*
* @return
@@ -1677,17 +1678,27 @@ mlx5_validate_rss_queues(struct rte_eth_dev *dev,
{
const struct mlx5_priv *priv = dev->data->dev_private;
bool is_hairpin = false;
+ bool is_ext_rss = false;
uint32_t i;
for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
- queues[i]);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ if (mlx5_is_external_rxq(dev, queues[0])) {
+ is_ext_rss = true;
+ continue;
+ }
+ if (is_ext_rss) {
+ *error = "Combining external and regular RSS queues is not supported";
+ *queue_idx = i;
+ return -ENOTSUP;
+ }
if (queues[i] >= priv->rxqs_n) {
*error = "queue index out of range";
*queue_idx = i;
return -EINVAL;
}
+ rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
if (rxq_ctrl == NULL) {
*error = "queue is not configured";
*queue_idx = i;
@@ -1782,7 +1793,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L4 partial RSS requested but L4 RSS"
" type not specified");
- if (!priv->rxqs_n)
+ if (!priv->rxqs_n && priv->ext_rxqs == NULL)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No Rx queues configured");
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 1e191a5704..353c7c05b2 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -18,6 +18,7 @@
#include "mlx5.h"
#include "mlx5_autoconf.h"
+#include "rte_pmd_mlx5.h"
/* Support tunnel matching. */
#define MLX5_FLOW_TUNNEL 10
@@ -218,8 +219,14 @@ uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_external_rxq *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
+ uint16_t idx);
+uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_external_rxq *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
+ uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int mlx5_ext_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
@@ -639,4 +646,27 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
return n == n_ibv;
}
+/**
+ * Check whether given RxQ is external.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue_idx
+ * Rx queue index.
+ *
+ * @return
+ * True if is external RxQ, otherwise false.
+ */
+static __rte_always_inline bool
+mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_rxq *rxq;
+
+ if (!priv->ext_rxqs || queue_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
+ return false;
+ rxq = &priv->ext_rxqs[queue_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+ return __atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+}
+
#endif /* RTE_PMD_MLX5_RX_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 93adc09369..720a98650c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2090,6 +2090,65 @@ mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
return rxq == NULL ? NULL : &rxq->ctrl->rxq;
}
+/**
+ * Increase an external Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External RX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_rxq *
+mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Decrease an external Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+uint32_t
+mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
+
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
+/**
+ * Get an external Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * External Rx queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_external_rxq *
+mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
+ return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+}
+
/**
* Release a Rx queue.
*
@@ -2173,6 +2232,37 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
return ret;
}
+/**
+ * Verify the external Rx Queue list is empty.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_external_rxq *rxq;
+ uint16_t i;
+ int ret = 0;
+
+ if (priv->ext_rxqs == NULL)
+ return 0;
+
+ for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+ rxq = mlx5_ext_rxq_get(dev, i);
+ if (rxq->refcnt < 2)
+ continue;
+ DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.",
+ dev->data->port_id, i);
+ ++ret;
+ }
+ return ret;
+}
+
/**
* Check whether RxQ type is Hairpin.
*
@@ -2188,8 +2278,11 @@ bool
mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ if (mlx5_is_external_rxq(dev, idx))
+ return false;
+ rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
}
@@ -2367,9 +2460,16 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
if (ref_qs)
for (i = 0; i != queues_n; ++i) {
- if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
- ret = -rte_errno;
- goto error;
+ if (mlx5_is_external_rxq(dev, queues[i])) {
+ if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
+ } else {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
}
}
ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
@@ -2380,8 +2480,12 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
error:
if (ref_qs) {
err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_deref(dev, queues[j]);
+ for (j = 0; j < i; j++) {
+ if (mlx5_is_external_rxq(dev, queues[j]))
+ mlx5_ext_rxq_deref(dev, queues[j]);
+ else
+ mlx5_rxq_deref(dev, queues[j]);
+ }
rte_errno = err;
}
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
--
2.25.1
next prev parent reply other threads:[~2022-02-22 21:05 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-22 21:04 [PATCH 0/6] mlx5: external RxQ support Michael Baum
2022-02-22 21:04 ` [PATCH 1/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-22 21:04 ` [PATCH 2/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-22 21:04 ` [PATCH 3/6] net/mlx5: optimize RxQ/TxQ control structure Michael Baum
2022-02-22 21:04 ` [PATCH 4/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-22 21:04 ` Michael Baum [this message]
2022-02-22 21:04 ` [PATCH 6/6] app/testpmd: add test for external RxQ Michael Baum
2022-02-23 18:48 ` [PATCH v2 0/6] mlx5: external RxQ support Michael Baum
2022-02-23 18:48 ` [PATCH v2 1/6] common/mlx5: consider local functions as internal Michael Baum
2022-02-23 18:48 ` [PATCH v2 2/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-23 18:48 ` [PATCH v2 3/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-23 18:48 ` [PATCH v2 4/6] net/mlx5: optimize RxQ/TxQ control structure Michael Baum
2022-02-23 18:48 ` [PATCH v2 5/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-23 18:48 ` [PATCH v2 6/6] net/mlx5: support queue/RSS action for external RxQ Michael Baum
2022-02-24 8:38 ` [PATCH v2 0/6] mlx5: external RxQ support Matan Azrad
2022-02-24 23:25 ` [PATCH v3 " Michael Baum
2022-02-24 23:25 ` [PATCH v3 1/6] common/mlx5: consider local functions as internal Michael Baum
2022-02-25 18:01 ` Ferruh Yigit
2022-02-25 18:38 ` Thomas Monjalon
2022-02-25 19:13 ` Ferruh Yigit
2022-02-24 23:25 ` [PATCH v3 2/6] common/mlx5: glue device and PD importation Michael Baum
2022-02-24 23:25 ` [PATCH v3 3/6] common/mlx5: add remote PD and CTX support Michael Baum
2022-02-24 23:25 ` [PATCH v3 4/6] net/mlx5: optimize RxQ/TxQ control structure Michael Baum
2022-02-24 23:25 ` [PATCH v3 5/6] net/mlx5: add external RxQ mapping API Michael Baum
2022-02-24 23:25 ` [PATCH v3 6/6] net/mlx5: support queue/RSS action for external RxQ Michael Baum
2022-02-25 17:39 ` [PATCH v3 0/6] mlx5: external RxQ support Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220222210416.2669519-6-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).