From: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
To: <dev@dpdk.org>
Cc: David Marchand <david.marchand@redhat.com>,
Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
Ori Kam <orika@nvidia.com>, <stable@dpdk.org>,
Matan Azrad <matan@nvidia.com>,
Shahaf Shuler <shahafs@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 2/3] net/mlx5: create drop queue using DevX
Date: Thu, 29 Jul 2021 17:00:19 +0300 [thread overview]
Message-ID: <20210729140020.1077946-3-dkozlyuk@nvidia.com> (raw)
In-Reply-To: <20210729140020.1077946-1-dkozlyuk@nvidia.com>
Drop queue creation and destruction were not implemented for DevX
flow engine and Verbs engine methods were used as a workaround.
Implement these methods for DevX so that there is a valid queue ID
that can be used regardless of queue configuration via API.
Cc: stable@dpdk.org
Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 4 -
drivers/net/mlx5/mlx5_devx.c | 204 ++++++++++++++++++++++++++-----
2 files changed, 176 insertions(+), 32 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 671dfface9..791024c2c6 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1756,10 +1756,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
if (config->devx && config->dv_flow_en && config->dest_tir) {
priv->obj_ops = devx_obj_ops;
- priv->obj_ops.drop_action_create =
- ibv_obj_ops.drop_action_create;
- priv->obj_ops.drop_action_destroy =
- ibv_obj_ops.drop_action_destroy;
#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
#else
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index a1db53577a..447d6bafb9 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -226,17 +226,17 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array.
+ * @param rxq_data
+ * RX queue data.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
+ struct mlx5_rxq_data *rxq_data)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_create_rq_attr rq_attr = { 0 };
@@ -289,20 +289,20 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array.
+ * @param rxq_data
+ * RX queue data.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
+ struct mlx5_rxq_data *rxq_data)
{
struct mlx5_devx_cq *cq_obj = 0;
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
@@ -497,13 +497,13 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
}
/* Create CQ using DevX API. */
- ret = mlx5_rxq_create_devx_cq_resources(dev, idx);
+ ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
if (ret) {
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
/* Create RQ using DevX API. */
- ret = mlx5_rxq_create_devx_rq_resources(dev, idx);
+ ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
if (ret) {
DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
dev->data->port_id, idx);
@@ -536,6 +536,11 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
* Pointer to Ethernet device.
* @param log_n
* Log of number of queues in the array.
+ * @param queues
+ * List of RX queue indices or NULL, in which case
+ * the attribute will be filled by drop queue ID.
+ * @param queues_n
+ * Size of @p queues array or 0 if it is NULL.
* @param ind_tbl
* DevX indirection table object.
*
@@ -563,6 +568,11 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
}
rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
rqt_attr->rqt_actual_size = rqt_n;
+ if (queues == NULL) {
+ for (i = 0; i < rqt_n; i++)
+ rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;
+ return rqt_attr;
+ }
for (i = 0; i != queues_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -670,7 +680,8 @@ mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
* @param[in] hash_fields
* Verbs protocol hash field to make the RSS on.
* @param[in] ind_tbl
- * Indirection table for TIR.
+ * Indirection table for TIR. If table queues array is NULL,
+ * a TIR for drop queue is assumed.
* @param[in] tunnel
* Tunnel type.
* @param[out] tir_attr
@@ -686,19 +697,27 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
int tunnel, struct mlx5_devx_tir_attr *tir_attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
+ enum mlx5_rxq_type rxq_obj_type;
bool lro = true;
uint32_t i;
- /* Enable TIR LRO only if all the queues were configured for. */
- for (i = 0; i < ind_tbl->queues_n; ++i) {
- if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
- lro = false;
- break;
+ /* NULL queues designate drop queue. */
+ if (ind_tbl->queues != NULL) {
+ struct mlx5_rxq_data *rxq_data =
+ (*priv->rxqs)[ind_tbl->queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ rxq_obj_type = rxq_ctrl->type;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < ind_tbl->queues_n; ++i) {
+ if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
+ lro = false;
+ break;
+ }
}
+ } else {
+ rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;
}
memset(tir_attr, 0, sizeof(*tir_attr));
tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
@@ -857,7 +876,7 @@ mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
}
/**
- * Create a DevX drop action for Rx Hash queue.
+ * Create a DevX drop Rx queue.
*
* @param dev
* Pointer to Ethernet device.
@@ -866,14 +885,99 @@ mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
+mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
{
- (void)dev;
- DRV_LOG(ERR, "DevX drop action is not supported yet.");
- rte_errno = ENOTSUP;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int socket_id = dev->device->numa_node;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_obj *rxq = NULL;
+ int ret;
+
+ /*
+ * Initialize dummy control structures.
+ * They are required to hold pointers for cleanup
+ * and are only accessible via drop queue DevX objects.
+ */
+ rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
+ 0, socket_id);
+ if (rxq_ctrl == NULL) {
+ DRV_LOG(ERR, "Port %u could not allocate drop queue control",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
+ if (rxq == NULL) {
+ DRV_LOG(ERR, "Port %u could not allocate drop queue object",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq->rxq_ctrl = rxq_ctrl;
+ rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
+ rxq_ctrl->priv = priv;
+ rxq_ctrl->obj = rxq;
+ rxq_data = &rxq_ctrl->rxq;
+ /* Create CQ using DevX API. */
+ ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
+ dev->data->port_id);
+ goto error;
+ }
+ /* Create RQ using DevX API. */
+ ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
+ if (ret != 0)
+ goto error;
+ /* Initialize drop queue. */
+ priv->drop_queue.rxq = rxq;
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (rxq != NULL) {
+ if (rxq->rq_obj.rq != NULL)
+ mlx5_devx_rq_destroy(&rxq->rq_obj);
+ if (rxq->cq_obj.cq != NULL)
+ mlx5_devx_cq_destroy(&rxq->cq_obj);
+ if (rxq->devx_channel)
+ mlx5_os_devx_destroy_event_channel
+ (rxq->devx_channel);
+ mlx5_free(rxq);
+ }
+ if (rxq_ctrl != NULL)
+ mlx5_free(rxq_ctrl);
+ rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
+/**
+ * Release drop Rx queue resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;
+
+ mlx5_rxq_devx_obj_release(rxq);
+ mlx5_free(rxq);
+ mlx5_free(rxq_ctrl);
+ priv->drop_queue.rxq = NULL;
+}
+
/**
* Release a drop hash Rx queue.
*
@@ -883,9 +987,53 @@ mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
static void
mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
{
- (void)dev;
- DRV_LOG(ERR, "DevX drop action is not supported yet.");
- rte_errno = ENOTSUP;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+ if (hrxq->tir != NULL)
+ mlx5_devx_tir_destroy(hrxq);
+ if (hrxq->ind_table->ind_table != NULL)
+ mlx5_devx_ind_table_destroy(hrxq->ind_table);
+ if (priv->drop_queue.rxq->rq != NULL)
+ mlx5_rxq_devx_obj_drop_release(dev);
+}
+
+/**
+ * Create a DevX drop action for Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+ int ret;
+
+ ret = mlx5_rxq_devx_obj_drop_create(dev);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop RX queue");
+ return ret;
+ }
+ /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
+ ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
+ goto error;
+ }
+ ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop hash RX queue");
+ goto error;
+ }
+ return 0;
+error:
+ mlx5_devx_drop_action_destroy(dev);
+ return ret;
}
/**
--
2.25.1
next prev parent reply other threads:[~2021-07-29 14:00 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-27 7:31 [dpdk-dev] [PATCH 0/4] net/mlx5: keep indirect actions across port restart Dmitry Kozlyuk
2021-07-27 7:31 ` [dpdk-dev] [PATCH 1/4] net/mlx5: discover max flow priority using DevX Dmitry Kozlyuk
2021-07-27 7:31 ` [dpdk-dev] [PATCH 2/4] net/mlx5: create drop queue " Dmitry Kozlyuk
2021-07-27 7:31 ` [dpdk-dev] [PATCH 3/4] net/mlx5: preserve indirect actions across port restart Dmitry Kozlyuk
2021-07-27 7:31 ` [dpdk-dev] [PATCH 4/4] ethdev: document indirect flow action life cycle Dmitry Kozlyuk
2021-07-28 9:50 ` Ori Kam
2021-07-28 8:05 ` [dpdk-dev] [PATCH 0/4] net/mlx5: keep indirect actions across port restart Andrew Rybchenko
2021-07-28 11:18 ` Dmitry Kozlyuk
2021-07-28 12:07 ` Ori Kam
2021-07-28 12:26 ` Andrew Rybchenko
2021-07-28 14:08 ` Dmitry Kozlyuk
2021-07-28 17:07 ` Ori Kam
2021-07-29 14:00 ` [dpdk-dev] [PATCH v2 " Dmitry Kozlyuk
2021-07-29 14:00 ` [dpdk-dev] [PATCH v2 1/3] net/mlx5: discover max flow priority using DevX Dmitry Kozlyuk
2021-07-29 14:00 ` Dmitry Kozlyuk [this message]
2021-07-29 14:00 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: preserve indirect actions across port restart Dmitry Kozlyuk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210729140020.1077946-3-dkozlyuk@nvidia.com \
--to=dkozlyuk@nvidia.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).